diff --git a/NuGet.config b/NuGet.config index 3b6333eeb72..f951f89147f 100644 --- a/NuGet.config +++ b/NuGet.config @@ -1,7 +1,7 @@ - + - - + + \ No newline at end of file diff --git a/build/scripts/Releasing.fsx b/build/scripts/Releasing.fsx index bbce0399b81..50b468bb8a0 100644 --- a/build/scripts/Releasing.fsx +++ b/build/scripts/Releasing.fsx @@ -1,7 +1,5 @@ #I @"../../packages/build/FAKE/tools" -#I @"../../packages/build/FSharp.Data/lib/net40" #r @"FakeLib.dll" -#r @"FSharp.Data.dll" #load @"Paths.fsx" #load @"Projects.fsx" #load @"Versioning.fsx" @@ -15,9 +13,6 @@ open Versioning open Building open FSharp.Data -// TODO: Use a complete project.json skeleton -type ProjectJson = JsonProvider<"../../src/Nest/project.json"> - type Release() = static let nugetPack = fun (projectName: ProjectName) -> let name = projectName.Nuget; @@ -40,36 +35,6 @@ type Release() = traceFAKE "%s" dir MoveFile Paths.NugetOutput nugetOutFile - static let updateVersion project = - CreateDir Paths.NugetOutput - use file = File.Open (project, FileMode.Open) - let doc = ProjectJson.Load file - - let newDoc = ProjectJson.Root( - doc.Authors, - doc.Owners, - doc.ProjectUrl, - doc.LicenseUrl, - doc.RequireLicenseAcceptance, - doc.IconUrl, - doc.Summary, - doc.Description, - doc.Title, - doc.Tags, - doc.Repository, - doc.Copyright, - Versioning.FileVersion, - doc.CompilationOptions, - doc.Configurations, - doc.Dependencies, - doc.Commands, - doc.Frameworks) - - file.Close () - File.Delete project - use writer = new StreamWriter(File.Open (project, FileMode.Create)) - newDoc.JsonValue.WriteTo(writer, JsonSaveOptions.None) - static member PackAll() = DotNetProject.All |> Seq.map (fun p -> p.ProjectName) @@ -80,7 +45,7 @@ type Release() = ++ "src/Elasticsearch.Net/project.json" // update versions - projects |> Seq.iter updateVersion + Versioning.PatchProjectJsons() // build nuget packages projects @@ -117,7 +82,6 @@ type Release() = System.Text.Encoding.UTF8 nuspec - // Include PDB for each target framework let frameworkDirs = (sprintf "%s/lib" unzippedDir |> directoryInfo).GetDirectories() for frameworkDir in frameworkDirs do @@ -132,6 +96,8 @@ type Release() = ZipHelper.Zip unzippedDir package !!(sprintf "%s/**/*.*" unzippedDir) DeleteDir unzippedDir + if (directoryExists Paths.NugetOutput = false) then CreateDir Paths.NugetOutput + // move to nuget output MoveFile Paths.NugetOutput package ) @@ -143,4 +109,13 @@ type Release() = match success with | 0 -> traceFAKE "publish to myget succeeded" |> ignore | _ -> failwith "publish to myget failed" |> ignore + ) + + static member PatchReleaseNotes() = + !! "src/**/project.json" + |> Seq.iter(fun f -> + RegexReplaceInFileWithEncoding + "\"releaseNotes\"\\s?:\\s?\".*\"" + (sprintf "\"releaseNotes\": \"See https://github.com/elastic/elasticsearch-net/releases/tag/%s\"" Versioning.FileVersion) + (new System.Text.UTF8Encoding(false)) f ) \ No newline at end of file diff --git a/build/scripts/Targets.fsx b/build/scripts/Targets.fsx index e4d5d5a2588..5cc75bde088 100644 --- a/build/scripts/Targets.fsx +++ b/build/scripts/Targets.fsx @@ -66,7 +66,8 @@ Target "Version" <| fun _ -> Versioning.PatchProjectJsons() Target "Release" <| fun _ -> - Release.PackAllDnx() + Release.PatchReleaseNotes() + Release.PackAllDnx() Sign.ValidateNugetDllAreSignedCorrectly() Versioning.ValidateArtifacts() diff --git a/docs/asciidoc/Aggregations/Bucket/Children/ChildrenAggregationMapping.doc.asciidoc b/docs/asciidoc/Aggregations/Bucket/Children/ChildrenAggregationMapping.doc.asciidoc deleted file mode 100644 index e03f8eb564a..00000000000 --- a/docs/asciidoc/Aggregations/Bucket/Children/ChildrenAggregationMapping.doc.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -To use the child aggregation you have to make sure -a `_parent` mapping is in place, here we create the project -index with two mapped types, `project` and `commitactivity` and -we add a `_parent` mapping from `commitactivity` to `parent` - -[source, csharp] ----- -var createProjectIndex = TestClient.GetClient().CreateIndex(typeof(Project), c => c - .Mappings(map=>map - .Map(m=>m.AutoMap()) - .Map(m=>m - .Parent() - ) - ) -); ----- diff --git a/docs/asciidoc/Aggregations/WritingAggregations.doc.asciidoc b/docs/asciidoc/Aggregations/WritingAggregations.doc.asciidoc deleted file mode 100644 index 7f2a8739c7d..00000000000 --- a/docs/asciidoc/Aggregations/WritingAggregations.doc.asciidoc +++ /dev/null @@ -1,78 +0,0 @@ -Aggregations are arguably one of the most powerful features of Elasticsearch. -NEST allows you to write your aggregations using a strict fluent dsl, a verbatim object initializer -syntax that maps verbatim to the elasticsearch API -a more terse object initializer aggregation DSL. - -Three different ways, yikes thats a lot to take in! Lets go over them one by one and explain when you might -want to use which one. - -The fluent lambda syntax is the most terse way to write aggregations. -It benefits from types that are carried over to sub aggregations - -[source, csharp] ----- -s => s -.Aggregations(aggs => aggs - .Children("name_of_child_agg", child => child - .Aggregations(childAggs => childAggs - .Average("average_per_child", avg => avg.Field(p => p.ConfidenceFactor)) - .Max("max_per_child", avg => avg.Field(p => p.ConfidenceFactor)) - ) - ) -) ----- -The object initializer syntax (OIS) is a one-to-one mapping with how aggregations -have to be represented in the Elasticsearch API. While it has the benefit of being a one-to-one -mapping, being dictionary based in C# means it can grow exponentially in complexity rather quickly. - -[source, csharp] ----- -new SearchRequest -{ - Aggregations = new ChildrenAggregation("name_of_child_agg", typeof(CommitActivity)) - { - Aggregations = - new AverageAggregation("average_per_child", "confidenceFactor") - && new MaxAggregation("max_per_child", "confidenceFactor") - } -} ----- -For this reason the OIS syntax can be shortened dramatically by using `*Agg` related family, -These allow you to forego introducing intermediary Dictionaries to represent the aggregation DSL. -It also allows you to combine multiple aggregations using bitwise AND (` -`) operator. - -Compare the following example with the previous vanilla OIS syntax - -[source, csharp] ----- -new SearchRequest -{ - Aggregations = new ChildrenAggregation("name_of_child_agg", typeof(CommitActivity)) - { - Aggregations = - new AverageAggregation("average_per_child", Field(p => p.ConfidenceFactor)) - && new MaxAggregation("max_per_child", Field(p => p.ConfidenceFactor)) - } -} ----- -An advanced scenario may involve an existing collection of aggregation functions that should be set as aggregations -on the request. Using LINQ's `.Aggregate()` method, each function can be applied to the aggregation descriptor -(`childAggs` below) in turn, returning the descriptor after each function application. - -[source, csharp] ----- -var aggregations = new List, IAggregationContainer>> -{ - a => a.Average("average_per_child", avg => avg.Field(p => p.ConfidenceFactor)), - a => a.Max("max_per_child", avg => avg.Field(p => p.ConfidenceFactor)) -}; -return s => s - .Aggregations(aggs => aggs - .Children("name_of_child_agg", child => child - .Aggregations(childAggs => - aggregations.Aggregate(childAggs, (acc, agg) => { agg(acc); return acc; }) - ) - ) - ); ----- diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/ConnectionPooling.Doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/ConnectionPooling.Doc.asciidoc deleted file mode 100644 index 8d4ec9d1de7..00000000000 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/ConnectionPooling.Doc.asciidoc +++ /dev/null @@ -1,161 +0,0 @@ -= Connection Pooling -Connection pooling is the internal mechanism that takes care of registering what nodes there are in the cluster and which -we can use to issue client calls on. - - -== SingleNodeConnectionPool -The simplest of all connection pools, this takes a single `Uri` and uses that to connect to elasticsearch for all the calls -It doesn't opt in to sniffing and pinging behavior, and will never mark nodes dead or alive. The one `Uri` it holds is always -ready to go. - - -[source, csharp] ----- -var uri = new Uri("http://localhost:9201"); -var pool = new SingleNodeConnectionPool(uri); -pool.Nodes.Should().HaveCount(1); -var node = pool.Nodes.First(); -node.Uri.Port.Should().Be(9201); ----- -This type of pool is hardwired to opt out of sniffing - -[source, csharp] ----- -pool.SupportsReseeding.Should().BeFalse(); ----- -and pinging - -[source, csharp] ----- -pool.SupportsPinging.Should().BeFalse(); ----- -When you use the low ceremony ElasticClient constructor that takes a single Uri, -We default to this SingleNodeConnectionPool - -[source, csharp] ----- -var client = new ElasticClient(uri); ----- -[source, csharp] ----- -client.ConnectionSettings.ConnectionPool.Should().BeOfType(); ----- -However we urge that you always pass your connection settings explicitly - -[source, csharp] ----- -client = new ElasticClient(new ConnectionSettings(uri)); ----- -[source, csharp] ----- -client.ConnectionSettings.ConnectionPool.Should().BeOfType(); ----- -or even better pass the connection pool explicitly - -[source, csharp] ----- -client = new ElasticClient(new ConnectionSettings(pool)); ----- -[source, csharp] ----- -client.ConnectionSettings.ConnectionPool.Should().BeOfType(); ----- -== StaticConnectionPool -The static connection pool is great if you have a known small sized cluster and do no want to enable -sniffing to find out the cluster topology. - - -[source, csharp] ----- -var uris = Enumerable.Range(9200, 5).Select(p => new Uri("http://localhost:" + p)); ----- -a connection pool can be seeded using an enumerable of `Uri`s - -[source, csharp] ----- -var pool = new StaticConnectionPool(uris); ----- -Or using an enumerable of `Node` - -[source, csharp] ----- -var nodes = uris.Select(u=>new Node(u)); ----- -[source, csharp] ----- -pool = new StaticConnectionPool(nodes); ----- -This type of pool is hardwired to opt out of sniffing - -[source, csharp] ----- -pool.SupportsReseeding.Should().BeFalse(); ----- -but supports pinging when enabled - -[source, csharp] ----- -pool.SupportsPinging.Should().BeTrue(); ----- -To create a client using this static connection pool pass -the connection pool to the connectionsettings you pass to ElasticClient - -[source, csharp] ----- -var client = new ElasticClient(new ConnectionSettings(pool)); ----- -[source, csharp] ----- -client.ConnectionSettings.ConnectionPool.Should().BeOfType(); ----- -== SniffingConnectionPool -A subclass of StaticConnectionPool that allows itself to be reseeded at run time. -It comes with a very minor overhead of a `ReaderWriterLockSlim` to ensure thread safety. - - -[source, csharp] ----- -var uris = Enumerable.Range(9200, 5).Select(p => new Uri("http://localhost:" + p)); ----- -a connection pool can be seeded using an enumerable of `Uri` - -[source, csharp] ----- -var pool = new SniffingConnectionPool(uris); ----- -Or using an enumerable of `Node` -A major benefit here is you can include known node roles when seeding -NEST can use this information to favour sniffing on master eligible nodes first -and take master only nodes out of rotation for issuing client calls on. - -[source, csharp] ----- -var nodes = uris.Select(u=>new Node(u)); ----- -[source, csharp] ----- -pool = new SniffingConnectionPool(nodes); ----- -This type of pool is hardwired to opt in to sniffing - -[source, csharp] ----- -pool.SupportsReseeding.Should().BeTrue(); ----- -and pinging - -[source, csharp] ----- -pool.SupportsPinging.Should().BeTrue(); ----- -To create a client using the sniffing connection pool pass -the connection pool to the connectionsettings you pass to ElasticClient - -[source, csharp] ----- -var client = new ElasticClient(new ConnectionSettings(pool)); ----- -[source, csharp] ----- -client.ConnectionSettings.ConnectionPool.Should().BeOfType(); ----- diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/Transports.Doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/Transports.Doc.asciidoc deleted file mode 100644 index 359ea30aefc..00000000000 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/Transports.Doc.asciidoc +++ /dev/null @@ -1,39 +0,0 @@ -= Transports - -The `ITransport` interface can be seen as the motor block of the client. It's interface is deceitfully simple. -It's ultimately responsible from translating a client call to a response. If for some reason you do not agree with the way we wrote -the internals of the client, by implementing a custom `ITransport`, you can circumvent all of it and introduce your own. - - - -Transport is generically typed to a type that implements IConnectionConfigurationValues -This is the minimum ITransport needs to report back for the client to function. -e.g in the low level client, transport is instantiated like this: - -[source, csharp] ----- -var lowLevelTransport = new Transport(new ConnectionConfiguration()); ----- -In the high level client like this: - -[source, csharp] ----- -var highlevelTransport = new Transport(new ConnectionSettings()); ----- -[source, csharp] ----- -var connectionPool = new SingleNodeConnectionPool(new Uri("http://localhost:9200")); -var inMemoryTransport = new Transport(new ConnectionSettings(connectionPool, new InMemoryConnection())); ----- -The only two methods on `ITransport` are `Request()` and `RequestAsync()`, the default `ITransport` implementation is responsible for introducing -many of the building blocks in the client, if these do not work for you can swap them out for your own custom `ITransport` implementation. -If you feel this need, please let us know as we'd love to learn why you've go down this route! - -[source, csharp] ----- -var response = inMemoryTransport.Request>(HttpMethod.GET, "/_search", new { query = new { match_all = new { } } }); ----- -[source, csharp] ----- -response = await inMemoryTransport.RequestAsync>(HttpMethod.GET, "/_search", new { query = new { match_all = new { } } }); ----- diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Exceptions/UnexpectedExceptions.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Exceptions/UnexpectedExceptions.doc.asciidoc deleted file mode 100644 index 1dc333afec6..00000000000 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Exceptions/UnexpectedExceptions.doc.asciidoc +++ /dev/null @@ -1,114 +0,0 @@ -== Unexpected exceptions -When a client call throws an exception that the IConnction can not handle, this exception will bubble -out the client as an UnexpectedElasticsearchClientException, regardless whether the client is configured to throw or not. -An IConnection is in charge of knowning what exceptions it can recover from or not. The default IConnection that is based on WebRequest can and -will recover from WebExceptions but others will be grounds for immediately exiting the pipeline. - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .ClientCalls(r => r.SucceedAlways()) - .ClientCalls(r => r.OnPort(9201).FailAlways(new Exception("boom!"))) - .StaticConnectionPool() - .Settings(s => s.DisablePing()) - ); -audit = await audit.TraceCall( - new ClientCall { - { AuditEvent.HealthyResponse, 9200 }, - } - ); -audit = await audit.TraceUnexpectedException( - new ClientCall { - { AuditEvent.BadResponse, 9201 }, - }, - (e) => - { - e.FailureReason.Should().Be(PipelineFailure.Unexpected); - e.InnerException.Should().NotBeNull(); - e.InnerException.Message.Should().Be("boom!"); - } - ); -e.FailureReason.Should().Be(PipelineFailure.Unexpected); -e.InnerException.Should().NotBeNull(); -e.InnerException.Message.Should().Be("boom!"); ----- - -Sometimes an unexpected exception happens further down in the pipeline, this is why we -wrap them inside an UnexpectedElasticsearchClientException so that information about where -in the pipeline the unexpected exception is not lost, here a call to 9200 fails using a webexception. -It then falls over to 9201 which throws an hard exception from within IConnection. We assert that we -can still see the audit trail for the whole coordinated request. - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) -#if DOTNETCORE - .ClientCalls(r => r.OnPort(9200).FailAlways(new System.Net.Http.HttpRequestException("recover"))) -#else - .ClientCalls(r => r.OnPort(9200).FailAlways(new WebException("recover"))) -#endif - .ClientCalls(r => r.OnPort(9201).FailAlways(new Exception("boom!"))) - .StaticConnectionPool() - .Settings(s => s.DisablePing()) - ); -audit = await audit.TraceUnexpectedException( - new ClientCall { - { AuditEvent.BadResponse, 9200 }, - { AuditEvent.BadResponse, 9201 }, - }, - (e) => - { - e.FailureReason.Should().Be(PipelineFailure.Unexpected); - e.InnerException.Should().NotBeNull(); - e.InnerException.Message.Should().Be("boom!"); - } - ); -e.FailureReason.Should().Be(PipelineFailure.Unexpected); -e.InnerException.Should().NotBeNull(); -e.InnerException.Message.Should().Be("boom!"); ----- - -An unexpected hard exception on ping and sniff is something we *do* try to revover from and failover. -Here pinging nodes on first use is enabled and 9200 throws on ping, we still fallover to 9201's ping succeeds. -However the client call on 9201 throws a hard exception we can not recover from - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .Ping(r => r.OnPort(9200).FailAlways(new Exception("ping exception"))) - .Ping(r => r.OnPort(9201).SucceedAlways()) - .ClientCalls(r => r.OnPort(9201).FailAlways(new Exception("boom!"))) - .StaticConnectionPool() - .AllDefaults() - ); ----- -[source, csharp] ----- -audit = await audit.TraceUnexpectedException( - new ClientCall { - { AuditEvent.PingFailure, 9200 }, - { AuditEvent.PingSuccess, 9201 }, - { AuditEvent.BadResponse, 9201 }, - }, - (e) => - { - e.FailureReason.Should().Be(PipelineFailure.Unexpected); -e.InnerException.Should().NotBeNull(); - e.InnerException.Message.Should().Be("boom!"); -e.SeenExceptions.Should().NotBeEmpty(); - var pipelineException = e.SeenExceptions.First(); - pipelineException.FailureReason.Should().Be(PipelineFailure.PingFailure); - pipelineException.InnerException.Message.Should().Be("ping exception"); -var pingException = e.AuditTrail.First(a => a.Event == AuditEvent.PingFailure).Exception; - pingException.Should().NotBeNull(); - pingException.Message.Should().Be("ping exception"); - - } -); ----- diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Exceptions/UnrecoverableExceptions.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Exceptions/UnrecoverableExceptions.doc.asciidoc deleted file mode 100644 index 7a2c0e52b85..00000000000 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Exceptions/UnrecoverableExceptions.doc.asciidoc +++ /dev/null @@ -1,44 +0,0 @@ -== Unrecoverable exceptions -Unrecoverable exceptions are excepted exceptions that are grounds to exit the client pipeline immediately. -By default the client won't throw on any ElasticsearchClientException but return an invalid response. -You can configure the client to throw using ThrowExceptions() on ConnectionSettings. The following test -both a client that throws and one that returns an invalid response with an `.OriginalException` exposed - - -[source, csharp] ----- -var recoverablExceptions = new[] - { - new PipelineException(PipelineFailure.BadResponse), - new PipelineException(PipelineFailure.PingFailure), - }; -recoverablExceptions.Should().OnlyContain(e => e.Recoverable); -var unrecoverableExceptions = new[] - { - new PipelineException(PipelineFailure.CouldNotStartSniffOnStartup), - new PipelineException(PipelineFailure.SniffFailure), - new PipelineException(PipelineFailure.Unexpected), - new PipelineException(PipelineFailure.BadAuthentication), - new PipelineException(PipelineFailure.MaxRetriesReached), - new PipelineException(PipelineFailure.MaxTimeoutReached) - }; -unrecoverableExceptions.Should().OnlyContain(e => !e.Recoverable); -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .Ping(r => r.SucceedAlways()) - .ClientCalls(r => r.FailAlways(401)) - .StaticConnectionPool() - .AllDefaults() - ); -audit = await audit.TraceElasticsearchException( - new ClientCall { - { AuditEvent.PingSuccess, 9200 }, - { AuditEvent.BadResponse, 9200 }, - }, - (e) => - { - e.FailureReason.Should().Be(PipelineFailure.BadAuthentication); - } - ); -e.FailureReason.Should().Be(PipelineFailure.BadAuthentication); ----- diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Failover/FallingOver.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Failover/FallingOver.doc.asciidoc deleted file mode 100644 index f83117ff4d0..00000000000 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Failover/FallingOver.doc.asciidoc +++ /dev/null @@ -1,80 +0,0 @@ -== Fail over -When using connection pooling and the pool has sufficient nodes a request will be retried if -the call to a node throws an exception or returns a 502 or 503 - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .ClientCalls(r => r.FailAlways()) - .ClientCalls(r => r.OnPort(9201).SucceedAlways()) - .StaticConnectionPool() - .Settings(s => s.DisablePing()) - ); -audit = await audit.TraceCall( - new ClientCall { - { BadResponse, 9200 }, - { HealthyResponse, 9201 }, - } - ); ----- -502 Bad Gateway -Will be treated as an error that requires retrying - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .ClientCalls(r => r.FailAlways(502)) - .ClientCalls(r => r.OnPort(9201).SucceedAlways()) - .StaticConnectionPool() - .Settings(s => s.DisablePing()) - ); -audit = await audit.TraceCall( - new ClientCall { - { BadResponse, 9200 }, - { HealthyResponse, 9201 }, - } - ); ----- -503 Service Unavailable -Will be treated as an error that requires retrying - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .ClientCalls(r => r.FailAlways(503)) - .ClientCalls(r => r.OnPort(9201).SucceedAlways()) - .StaticConnectionPool() - .Settings(s => s.DisablePing()) - ); -audit = await audit.TraceCall( - new ClientCall { - { BadResponse, 9200 }, - { HealthyResponse, 9201 }, - } - ); ----- - -If a call returns a valid http status code other then 502/503 the request won't be retried. - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .ClientCalls(r => r.FailAlways(418)) - .ClientCalls(r => r.OnPort(9201).SucceedAlways()) - .StaticConnectionPool() - .Settings(s => s.DisablePing()) - ); -audit = await audit.TraceCall( - new ClientCall { - { BadResponse, 9200 }, - } - ); ----- diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/MaxRetries/RespectsMaxRetry.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/MaxRetries/RespectsMaxRetry.doc.asciidoc deleted file mode 100644 index 2aa8b032712..00000000000 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/MaxRetries/RespectsMaxRetry.doc.asciidoc +++ /dev/null @@ -1,146 +0,0 @@ -== MaxRetries -By default retry as many times as we have nodes. However retries still respect the request timeout. -Meaning if you have a 100 node cluster and a request timeout of 20 seconds we will retry as many times as we can -but give up after 20 seconds - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .ClientCalls(r => r.FailAlways()) - .ClientCalls(r => r.OnPort(9209).SucceedAlways()) - .StaticConnectionPool() - .Settings(s => s.DisablePing()) - ); -audit = await audit.TraceCall( - new ClientCall { - { BadResponse, 9200 }, - { BadResponse, 9201 }, - { BadResponse, 9202 }, - { BadResponse, 9203 }, - { BadResponse, 9204 }, - { BadResponse, 9205 }, - { BadResponse, 9206 }, - { BadResponse, 9207 }, - { BadResponse, 9208 }, - { HealthyResponse, 9209 } - } - ); ----- - -When you have a 100 node cluster you might want to ensure a fixed number of retries. -Remember that the actual number of requests is initial attempt + set number of retries - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .ClientCalls(r => r.FailAlways()) - .ClientCalls(r => r.OnPort(9209).SucceedAlways()) - .StaticConnectionPool() - .Settings(s => s.DisablePing().MaximumRetries(3)) - ); -audit = await audit.TraceCall( - new ClientCall { - { BadResponse, 9200 }, - { BadResponse, 9201 }, - { BadResponse, 9202 }, - { BadResponse, 9203 }, - { MaxRetriesReached } - } - ); ----- - -In our previous test we simulated very fast failures, in the real world a call might take upwards of a second -Here we simulate a particular heavy search that takes 10 seconds to fail, our Request timeout is set to 20 seconds. -In this case it does not make sense to retry our 10 second query on 10 nodes. We should try it twice and give up before a third call is attempted - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(10))) - .ClientCalls(r => r.OnPort(9209).SucceedAlways()) - .StaticConnectionPool() - .Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(20))) - ); -audit = await audit.TraceCall( - new ClientCall { - { BadResponse, 9200 }, - { BadResponse, 9201 }, - { MaxTimeoutReached } - } - ); ----- - -If you set smaller request time outs you might not want it to also affect the retry timeout, therefor you can configure these separately too. -Here we simulate calls taking 3 seconds, a request time out of 2 and an overall retry timeout of 10 seconds. -We should see 5 attempts to perform this query, testing that our request timeout cuts the query off short and that our max retry timeout of 10 -wins over the configured request timeout - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3))) - .ClientCalls(r => r.OnPort(9209).FailAlways()) - .StaticConnectionPool() - .Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(2)).MaxRetryTimeout(TimeSpan.FromSeconds(10))) - ); -audit = await audit.TraceCall( - new ClientCall { - { BadResponse, 9200 }, - { BadResponse, 9201 }, - { BadResponse, 9202 }, - { BadResponse, 9203 }, - { BadResponse, 9204 }, - { MaxTimeoutReached } - } - ); ----- - -If your retry policy expands beyond available nodes we won't retry the same node twice - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(2) - .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3))) - .ClientCalls(r => r.OnPort(9209).SucceedAlways()) - .StaticConnectionPool() - .Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(2)).MaxRetryTimeout(TimeSpan.FromSeconds(10))) - ); -audit = await audit.TraceCall( - new ClientCall { - { BadResponse, 9200 }, - { BadResponse, 9201 }, - { MaxRetriesReached } - } - ); ----- - -This makes setting any retry setting on a single node connection pool a NOOP, this is by design! -Connection pooling and connection failover is about trying to fail sanely whilst still utilizing available resources and -not giving up on the fail fast principle. It's *NOT* a mechanism for forcing requests to succeed. - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3))) - .ClientCalls(r => r.OnPort(9209).SucceedAlways()) - .SingleNodeConnection() - .Settings(s => s.DisablePing().MaximumRetries(10)) - ); -audit = await audit.TraceCall( - new ClientCall { - { BadResponse, 9200 } - } - ); ----- diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Pinging/FirstUsage.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Pinging/FirstUsage.doc.asciidoc deleted file mode 100644 index 6648607066a..00000000000 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Pinging/FirstUsage.doc.asciidoc +++ /dev/null @@ -1,128 +0,0 @@ -== Pinging - -Pinging is enabled by default for the Static & Sniffing connection pool. -This means that the first time a node is used or resurrected we issue a ping with a smaller (configurable) timeout. -This allows us to fail and fallover to a healthy node faster - - -A cluster with 2 nodes where the second node fails on ping - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(2) - .Ping(p => p.Succeeds(Always)) - .Ping(p => p.OnPort(9201).FailAlways()) - .StaticConnectionPool() - .AllDefaults() -); ----- -[source, csharp] ----- -await audit.TraceCalls( ----- -The first call goes to 9200 which succeeds - -[source, csharp] ----- -new ClientCall { - { PingSuccess, 9200}, - { HealthyResponse, 9200}, - { pool => - { - pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(0); - } } - }, ----- -The 2nd call does a ping on 9201 because its used for the first time. -It fails so we wrap over to node 9200 which we've already pinged - -[source, csharp] ----- -new ClientCall { - { PingFailure, 9201}, - { HealthyResponse, 9200}, ----- -Finally we assert that the connectionpool has one node that is marked as dead - -[source, csharp] ----- -{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(1) } - } -); ----- -A cluster with 4 nodes where the second and third pings fail - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(4) - .Ping(p => p.SucceedAlways()) - .Ping(p => p.OnPort(9201).FailAlways()) - .Ping(p => p.OnPort(9202).FailAlways()) - .StaticConnectionPool() - .AllDefaults() -); ----- -[source, csharp] ----- -await audit.TraceCalls( ----- -The first call goes to 9200 which succeeds - -[source, csharp] ----- -new ClientCall { - { PingSuccess, 9200}, - { HealthyResponse, 9200}, - { pool => - { - pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(0); - } } - }, ----- -The 2nd call does a ping on 9201 because its used for the first time. -It fails and so we ping 9202 which also fails. We then ping 9203 becuase -we haven't used it before and it succeeds - -[source, csharp] ----- -new ClientCall { - { PingFailure, 9201}, - { PingFailure, 9202}, - { PingSuccess, 9203}, - { HealthyResponse, 9203}, ----- -Finally we assert that the connectionpool has two nodes that are marked as dead - -[source, csharp] ----- -{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } - } -); ----- -A healthy cluster of 4 (min master nodes of 3 of course!) - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(4) - .Ping(p => p.SucceedAlways()) - .StaticConnectionPool() - .AllDefaults() -); ----- -[source, csharp] ----- -await audit.TraceCalls( - new ClientCall { { PingSuccess, 9200}, { HealthyResponse, 9200} }, - new ClientCall { { PingSuccess, 9201}, { HealthyResponse, 9201} }, - new ClientCall { { PingSuccess, 9202}, { HealthyResponse, 9202} }, - new ClientCall { { PingSuccess, 9203}, { HealthyResponse, 9203} }, - new ClientCall { { HealthyResponse, 9200} }, - new ClientCall { { HealthyResponse, 9201} }, - new ClientCall { { HealthyResponse, 9202} }, - new ClientCall { { HealthyResponse, 9203} }, - new ClientCall { { HealthyResponse, 9200} } - ); ----- diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Pinging/Revival.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Pinging/Revival.doc.asciidoc deleted file mode 100644 index 89c4e99284a..00000000000 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Pinging/Revival.doc.asciidoc +++ /dev/null @@ -1,48 +0,0 @@ -== Pinging - -When a node is marked dead it will only be put in the dog house for a certain amount of time. Once it comes out of the dog house, or revived, we schedule a ping -before the actual call to make sure its up and running. If its still down we put it back in the dog house a little longer. For an explanation on these timeouts see: TODO LINK - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(3) - .ClientCalls(r => r.SucceedAlways()) - .ClientCalls(r => r.OnPort(9202).Fails(Once)) - .Ping(p => p.SucceedAlways()) - .StaticConnectionPool() - .AllDefaults() - ); -audit = await audit.TraceCalls( - new ClientCall { { PingSuccess, 9200 }, { HealthyResponse, 9200 } }, - new ClientCall { { PingSuccess, 9201 }, { HealthyResponse, 9201 } }, - new ClientCall { - { PingSuccess, 9202}, - { BadResponse, 9202}, - { HealthyResponse, 9200}, - { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(1) } - }, - new ClientCall { { HealthyResponse, 9201 } }, - new ClientCall { { HealthyResponse, 9200 } }, - new ClientCall { { HealthyResponse, 9201 } }, - new ClientCall { - { HealthyResponse, 9200 }, - { pool => pool.Nodes.First(n=>!n.IsAlive).DeadUntil.Should().BeAfter(DateTime.UtcNow) } - } - ); -audit = await audit.TraceCalls( - new ClientCall { { HealthyResponse, 9201 } }, - new ClientCall { { HealthyResponse, 9200 } }, - new ClientCall { { HealthyResponse, 9201 } } - ); -audit.ChangeTime(d => d.AddMinutes(20)); -audit = await audit.TraceCalls( - new ClientCall { { HealthyResponse, 9201 } }, - new ClientCall { - { Resurrection, 9202 }, - { PingSuccess, 9202 }, - { HealthyResponse, 9202 } - } - ); ----- diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/RequestOverrides/RespectsMaxRetryOverrides.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/RequestOverrides/RespectsMaxRetryOverrides.doc.asciidoc deleted file mode 100644 index ce2f5afac55..00000000000 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/RequestOverrides/RespectsMaxRetryOverrides.doc.asciidoc +++ /dev/null @@ -1,68 +0,0 @@ -== MaxRetries -By default retry as many times as we have nodes. However retries still respect the request timeout. -Meaning if you have a 100 node cluster and a request timeout of 20 seconds we will retry as many times as we can -but give up after 20 seconds - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .ClientCalls(r => r.FailAlways()) - .ClientCalls(r => r.OnPort(9209).SucceedAlways()) - .StaticConnectionPool() - .Settings(s => s.DisablePing()) - ); -audit = await audit.TraceCall( - new ClientCall(r => r.MaxRetries(2)) { - { BadResponse, 9200 }, - { BadResponse, 9201 }, - { BadResponse, 9202 }, - { MaxRetriesReached } - } - ); ----- - -When you have a 100 node cluster you might want to ensure a fixed number of retries. -Remember that the actual number of requests is initial attempt + set number of retries - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .ClientCalls(r => r.FailAlways()) - .ClientCalls(r => r.OnPort(9209).SucceedAlways()) - .StaticConnectionPool() - .Settings(s => s.DisablePing().MaximumRetries(5)) - ); -audit = await audit.TraceCall( - new ClientCall(r => r.MaxRetries(2)) { - { BadResponse, 9200 }, - { BadResponse, 9201 }, - { BadResponse, 9202 }, - { MaxRetriesReached } - } - ); ----- - -This makes setting any retry setting on a single node connection pool a NOOP, this is by design! -Connection pooling and connection failover is about trying to fail sanely whilst still utilizing available resources and -not giving up on the fail fast principle. It's *NOT* a mechanism for forcing requests to succeed. - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3))) - .ClientCalls(r => r.OnPort(9209).SucceedAlways()) - .SingleNodeConnection() - .Settings(s => s.DisablePing().MaximumRetries(10)) - ); -audit = await audit.TraceCall( - new ClientCall(r => r.MaxRetries(10)) { - { BadResponse, 9200 } - } - ); ----- diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnConnectionFailure.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnConnectionFailure.doc.asciidoc deleted file mode 100644 index ef2b68cd4cc..00000000000 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnConnectionFailure.doc.asciidoc +++ /dev/null @@ -1,175 +0,0 @@ -== Sniffing on connection failure -Sniffing on connection is enabled by default when using a connection pool that allows reseeding. -The only IConnectionPool we ship that allows this is the SniffingConnectionPool. - -This can be very handy to force a refresh of the pools known healthy node by inspecting elasticsearch itself. -A sniff tries to get the nodes by asking each currently known node until one response. - - -Here we seed our connection with 5 known nodes 9200-9204 of which we think -9202, 9203, 9204 are master eligible nodes. Our virtualized cluster will throw once when doing -a search on 9201. This should a sniff to be kicked off. - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(5) - .MasterEligible(9202, 9203, 9204) - .ClientCalls(r => r.SucceedAlways()) - .ClientCalls(r => r.OnPort(9201).Fails(Once)) ----- -When the call fails on 9201 the sniff succeeds and returns a new cluster of healty nodes -this cluster only has 3 nodes and the known masters are 9200 and 9202 but a search on 9201 -still fails once - -[source, csharp] ----- -.Sniff(p => p.SucceedAlways(Framework.Cluster - .Nodes(3) - .MasterEligible(9200, 9202) - .ClientCalls(r => r.OnPort(9201).Fails(Once)) ----- -After this second failure on 9201 another sniff will be returned a cluster that no -longer fails but looks completely different (9210-9212) we should be able to handle this - -[source, csharp] ----- -.Sniff(s => s.SucceedAlways(Framework.Cluster - .Nodes(3, 9210) - .MasterEligible(9210, 9212) - .ClientCalls(r => r.SucceedAlways()) - .Sniff(r => r.SucceedAlways()) - )) - )) - .SniffingConnectionPool() - .Settings(s => s.DisablePing().SniffOnStartup(false)) -); ----- -[source, csharp] ----- -audit = await audit.TraceCalls( ----- - - -[source, csharp] ----- -new ClientCall { - { HealthyResponse, 9200 }, - { pool => pool.Nodes.Count.Should().Be(5) } - }, - new ClientCall { - { BadResponse, 9201}, ----- -We assert we do a sniff on our first known master node 9202 - -[source, csharp] ----- -{ SniffOnFail }, - { SniffSuccess, 9202}, - { HealthyResponse, 9200}, ----- -Our pool should now have three nodes - -[source, csharp] ----- -{ pool => pool.Nodes.Count.Should().Be(3) } - }, - new ClientCall { - { BadResponse, 9201}, ----- -We assert we do a sniff on the first master node in our updated cluster - -[source, csharp] ----- -{ SniffOnFail }, - { SniffSuccess, 9200}, - { HealthyResponse, 9210}, - { pool => pool.Nodes.Count.Should().Be(3) } - }, - new ClientCall { { HealthyResponse, 9211 } }, - new ClientCall { { HealthyResponse, 9212 } }, - new ClientCall { { HealthyResponse, 9210 } }, - new ClientCall { { HealthyResponse, 9211 } }, - new ClientCall { { HealthyResponse, 9212 } }, - new ClientCall { { HealthyResponse, 9210 } }, - new ClientCall { { HealthyResponse, 9211 } }, - new ClientCall { { HealthyResponse, 9212 } }, - new ClientCall { { HealthyResponse, 9210 } } -); ----- -Here we set up our cluster exactly the same as the previous setup -Only we enable pinging (default is true) and make the ping fail - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(5) - .MasterEligible(9202, 9203, 9204) - .Ping(r => r.OnPort(9201).Fails(Once)) - .Sniff(p => p.SucceedAlways(Framework.Cluster - .Nodes(3) - .MasterEligible(9200, 9202) - .Ping(r => r.OnPort(9201).Fails(Once)) - .Sniff(s => s.SucceedAlways(Framework.Cluster - .Nodes(3, 9210) - .MasterEligible(9210, 9211) - .Ping(r => r.SucceedAlways()) - .Sniff(r => r.SucceedAlways()) - )) - )) - .SniffingConnectionPool() - .Settings(s => s.SniffOnStartup(false)) -); ----- -[source, csharp] ----- -audit = await audit.TraceCalls( - new ClientCall { - { PingSuccess, 9200 }, - { HealthyResponse, 9200 }, - { pool => pool.Nodes.Count.Should().Be(5) } - }, - new ClientCall { - { PingFailure, 9201}, ----- -We assert we do a sniff on our first known master node 9202 - -[source, csharp] ----- -{ SniffOnFail }, - { SniffSuccess, 9202}, - { PingSuccess, 9200}, - { HealthyResponse, 9200}, ----- -Our pool should now have three nodes - -[source, csharp] ----- -{ pool => pool.Nodes.Count.Should().Be(3) } - }, - new ClientCall { - { PingFailure, 9201}, ----- -We assert we do a sniff on the first master node in our updated cluster - -[source, csharp] ----- -{ SniffOnFail }, - { SniffSuccess, 9200}, - { PingSuccess, 9210}, - { HealthyResponse, 9210}, - { pool => pool.Nodes.Count.Should().Be(3) } - }, - new ClientCall { { PingSuccess, 9211 }, { HealthyResponse, 9211 } }, - new ClientCall { { PingSuccess, 9212 }, { HealthyResponse, 9212 } }, ----- -9210 was already pinged after the sniff returned the new nodes - -[source, csharp] ----- -new ClientCall { { HealthyResponse, 9210 } }, - new ClientCall { { HealthyResponse, 9211 } }, - new ClientCall { { HealthyResponse, 9212 } }, - new ClientCall { { HealthyResponse, 9210 } } -); ----- diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnStaleClusterState.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnStaleClusterState.doc.asciidoc deleted file mode 100644 index d20b01abde4..00000000000 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnStaleClusterState.doc.asciidoc +++ /dev/null @@ -1,97 +0,0 @@ -== Sniffing periodically - -Connection pools that return true for `SupportsReseeding` can be configured to sniff periodically. -In addition to sniffing on startup and sniffing on failures, sniffing periodically can benefit scenerio's where -clusters are often scaled horizontally during peak hours. An application might have a healthy view of a subset of the nodes -but without sniffing periodically it will never find the nodes that have been added to help out with load - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .MasterEligible(9202, 9203, 9204) - .ClientCalls(r => r.SucceedAlways()) - .Sniff(s => s.SucceedAlways(Framework.Cluster - .Nodes(100) - .MasterEligible(9202, 9203, 9204) - .ClientCalls(r => r.SucceedAlways()) - .Sniff(ss => ss.SucceedAlways(Framework.Cluster - .Nodes(10) - .MasterEligible(9202, 9203, 9204) - .ClientCalls(r => r.SucceedAlways()) - )) - )) - .SniffingConnectionPool() - .Settings(s => s - .DisablePing() - .SniffOnConnectionFault(false) - .SniffOnStartup(false) - .SniffLifeSpan(TimeSpan.FromMinutes(30)) - ) - ); ----- -healty cluster all nodes return healthy responses - -[source, csharp] ----- -audit = await audit.TraceCalls( - new ClientCall { { HealthyResponse, 9200 } }, - new ClientCall { { HealthyResponse, 9201 } }, - new ClientCall { { HealthyResponse, 9202 } }, - new ClientCall { { HealthyResponse, 9203 } }, - new ClientCall { { HealthyResponse, 9204 } }, - new ClientCall { { HealthyResponse, 9205 } }, - new ClientCall { { HealthyResponse, 9206 } }, - new ClientCall { { HealthyResponse, 9207 } }, - new ClientCall { { HealthyResponse, 9208 } }, - new ClientCall { { HealthyResponse, 9209 } }, - new ClientCall { - { HealthyResponse, 9200 }, - { pool => pool.Nodes.Count.Should().Be(10) } - } -); ----- -Now let's forward the clock 31 minutes, our sniff lifespan should now go state -and the first call should do a sniff which discovered we scaled up to a 100 nodes! - -[source, csharp] ----- -audit.ChangeTime(d => d.AddMinutes(31)); ----- -[source, csharp] ----- -audit = await audit.TraceCalls( - new ClientCall { ----- -a sniff is done first and it prefers the first node master node - -[source, csharp] ----- -{ SniffOnStaleCluster }, - { SniffSuccess, 9202 }, - { HealthyResponse, 9201 }, - { pool => pool.Nodes.Count.Should().Be(100) } - } -); ----- -[source, csharp] ----- -audit.ChangeTime(d => d.AddMinutes(31)); ----- -[source, csharp] ----- -audit = await audit.TraceCalls( - new ClientCall { ----- -a sniff is done first and it prefers the first node master node - -[source, csharp] ----- -{ SniffOnStaleCluster }, - { SniffSuccess, 9202 }, - { HealthyResponse, 9200 }, - { pool => pool.Nodes.Count.Should().Be(10) } - } -); ----- diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnStartup.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnStartup.doc.asciidoc deleted file mode 100644 index 1f27d68c313..00000000000 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/OnStartup.doc.asciidoc +++ /dev/null @@ -1,119 +0,0 @@ -== Sniffing on startup - -Connection pools that return true for `SupportsReseeding` by default sniff on startup. - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .Sniff(s => s.Fails(Always)) - .Sniff(s => s.OnPort(9202).Succeeds(Always)) - .SniffingConnectionPool() - .AllDefaults() - ); -await audit.TraceCall(new ClientCall - { - { SniffOnStartup}, - { SniffFailure, 9200}, - { SniffFailure, 9201}, - { SniffSuccess, 9202}, - { PingSuccess , 9200}, - { HealthyResponse, 9200} - }); -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .Sniff(s => s.Fails(Always)) - .Sniff(s => s.OnPort(9202).Succeeds(Always)) - .SniffingConnectionPool() - .AllDefaults() - ); -await audit.TraceCalls( - new ClientCall - { - { SniffOnStartup}, - { SniffFailure, 9200}, - { SniffFailure, 9201}, - { SniffSuccess, 9202}, - { PingSuccess , 9200}, - { HealthyResponse, 9200} - }, - new ClientCall - { - { PingSuccess, 9201}, - { HealthyResponse, 9201} - } - ); -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .Sniff(s => s.Fails(Always)) - .Sniff(s => s.OnPort(9202).Succeeds(Always, Framework.Cluster.Nodes(8, startFrom: 9204))) - .SniffingConnectionPool() - .AllDefaults() - ); -await audit.TraceCall(new ClientCall { - { SniffOnStartup}, - { SniffFailure, 9200}, - { SniffFailure, 9201}, - { SniffSuccess, 9202}, - { PingSuccess, 9204}, - { HealthyResponse, 9204} - }); -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .Sniff(s => s.Fails(Always)) - .Sniff(s => s.OnPort(9209).Succeeds(Always)) - .SniffingConnectionPool() - .AllDefaults() - ); -await audit.TraceCall(new ClientCall { - { SniffOnStartup}, - { SniffFailure, 9200}, - { SniffFailure, 9201}, - { SniffFailure, 9202}, - { SniffFailure, 9203}, - { SniffFailure, 9204}, - { SniffFailure, 9205}, - { SniffFailure, 9206}, - { SniffFailure, 9207}, - { SniffFailure, 9208}, - { SniffSuccess, 9209}, - { PingSuccess, 9200}, - { HealthyResponse, 9200} - }); -var audit = new Auditor(() => Framework.Cluster - .Nodes(new[] { - new Node(new Uri("http://localhost:9200")) { MasterEligible = false }, - new Node(new Uri("http://localhost:9201")) { MasterEligible = false }, - new Node(new Uri("http://localhost:9202")) { MasterEligible = true }, - }) - .Sniff(s => s.Succeeds(Always)) - .SniffingConnectionPool() - .AllDefaults() - ); -await audit.TraceCall(new ClientCall { - { SniffOnStartup}, - { SniffSuccess, 9202}, - { PingSuccess, 9200}, - { HealthyResponse, 9200} - }); -var audit = new Auditor(() => Framework.Cluster - .Nodes(new[] { - new Node(new Uri("http://localhost:9200")) { MasterEligible = true }, - new Node(new Uri("http://localhost:9201")) { MasterEligible = true }, - new Node(new Uri("http://localhost:9202")) { MasterEligible = false }, - }) - .Sniff(s => s.Fails(Always)) - .Sniff(s => s.OnPort(9202).Succeeds(Always)) - .SniffingConnectionPool() - .AllDefaults() - ); -await audit.TraceCall(new ClientCall { - { SniffOnStartup}, - { SniffFailure, 9200}, - { SniffFailure, 9201}, - { SniffSuccess, 9202}, - { PingSuccess, 9200}, - { HealthyResponse, 9200} - }); ----- diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/RoleDetection.doc.asciidoc b/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/RoleDetection.doc.asciidoc deleted file mode 100644 index 576d9500d74..00000000000 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/Sniffing/RoleDetection.doc.asciidoc +++ /dev/null @@ -1,121 +0,0 @@ -== Sniffing role detection - -When we sniff the custer state we detect the role of the node whether its master eligible and holds data -We use this information when selecting a node to perform an API call on. - - -[source, csharp] ----- -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .Sniff(s => s.Fails(Always)) - .Sniff(s => s.OnPort(9202) - .Succeeds(Always, Framework.Cluster.Nodes(8).MasterEligible(9200, 9201, 9202)) - ) - .SniffingConnectionPool() - .AllDefaults() - ) - { - AssertPoolBeforeCall = (pool) => - { - pool.Should().NotBeNull(); - pool.Nodes.Should().HaveCount(10); - pool.Nodes.Where(n => n.MasterEligible).Should().HaveCount(10); - }, - AssertPoolAfterCall = (pool) => - { - pool.Should().NotBeNull(); - pool.Nodes.Should().HaveCount(8); - pool.Nodes.Where(n => n.MasterEligible).Should().HaveCount(3); - } - }; -pool.Should().NotBeNull(); -pool.Nodes.Should().HaveCount(10); -pool.Nodes.Where(n => n.MasterEligible).Should().HaveCount(10); -pool.Should().NotBeNull(); -pool.Nodes.Should().HaveCount(8); -pool.Nodes.Where(n => n.MasterEligible).Should().HaveCount(3); -await audit.TraceStartup(); -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .Sniff(s => s.Fails(Always)) - .Sniff(s => s.OnPort(9202) - .Succeeds(Always, Framework.Cluster.Nodes(8).StoresNoData(9200, 9201, 9202)) - ) - .SniffingConnectionPool() - .AllDefaults() - ) - { - AssertPoolBeforeCall = (pool) => - { - pool.Should().NotBeNull(); - pool.Nodes.Should().HaveCount(10); - pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(10); - }, - - AssertPoolAfterCall = (pool) => - { - pool.Should().NotBeNull(); - pool.Nodes.Should().HaveCount(8); - pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(5); - } - }; -pool.Should().NotBeNull(); -pool.Nodes.Should().HaveCount(10); -pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(10); -pool.Should().NotBeNull(); -pool.Nodes.Should().HaveCount(8); -pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(5); -await audit.TraceStartup(); -var audit = new Auditor(() => Framework.Cluster - .Nodes(10) - .Sniff(s => s.SucceedAlways() - .Succeeds(Always, Framework.Cluster.Nodes(8).StoresNoData(9200, 9201, 9202).SniffShouldReturnFqdn()) - ) - .SniffingConnectionPool() - .AllDefaults() - ) - { - AssertPoolBeforeCall = (pool) => - { - pool.Should().NotBeNull(); - pool.Nodes.Should().HaveCount(10); - pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(10); - pool.Nodes.Should().OnlyContain(n => n.Uri.Host == "localhost"); - }, - - AssertPoolAfterCall = (pool) => - { - pool.Should().NotBeNull(); - pool.Nodes.Should().HaveCount(8); - pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(5); - pool.Nodes.Should().OnlyContain(n => n.Uri.Host.StartsWith("fqdn") && !n.Uri.Host.Contains("/")); - } - }; -pool.Should().NotBeNull(); -pool.Nodes.Should().HaveCount(10); -pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(10); -pool.Nodes.Should().OnlyContain(n => n.Uri.Host == "localhost"); -pool.Should().NotBeNull(); -pool.Nodes.Should().HaveCount(8); -pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(5); -pool.Nodes.Should().OnlyContain(n => n.Uri.Host.StartsWith("fqdn") && !n.Uri.Host.Contains("/")); -await audit.TraceStartup(); -var node = SniffAndReturnNode(); -node.MasterEligible.Should().BeTrue(); -node.HoldsData.Should().BeFalse(); -node = await SniffAndReturnNodeAsync(); -node.MasterEligible.Should().BeTrue(); -node.HoldsData.Should().BeFalse(); -var pipeline = CreatePipeline(); -pipeline.Sniff(); -var pipeline = CreatePipeline(); -await pipeline.SniffAsync(); -this._settings = - this._cluster.Client(u => new SniffingConnectionPool(new[] {u}), c => c.PrettyJson()).ConnectionSettings; -var pipeline = new RequestPipeline(this._settings, DateTimeProvider.Default, new MemoryStreamFactory(), - new SearchRequestParameters()); -var nodes = this._settings.ConnectionPool.Nodes; -nodes.Should().NotBeEmpty().And.HaveCount(1); -var node = nodes.First(); ----- diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/DocumentPaths/DocumentPaths.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/DocumentPaths/DocumentPaths.doc.asciidoc deleted file mode 100644 index 22a2862b8ef..00000000000 --- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/DocumentPaths/DocumentPaths.doc.asciidoc +++ /dev/null @@ -1,104 +0,0 @@ -# DocumentPaths -Many API's in elasticsearch describe a path to a document. In NEST besides generating a constructor that takes -and Index, Type and Id seperately we also generate a constructor taking a DocumentPath that allows you to describe the path -to your document more succintly - -Manually newing - -here we create a new document path based on Project with the id 1 - -[source, csharp] ----- -IDocumentPath path = new DocumentPath(1); ----- -[source, csharp] ----- -Expect("project").WhenSerializing(path.Index); -Expect("project").WhenSerializing(path.Type); -Expect(1).WhenSerializing(path.Id); ----- -You can still override the inferred index and type name - -[source, csharp] ----- -path = new DocumentPath(1).Type("project1"); ----- -[source, csharp] ----- -Expect("project1").WhenSerializing(path.Type); -path = new DocumentPath(1).Index("project1"); -Expect("project1").WhenSerializing(path.Index); ----- -there is also a static way to describe such paths - -[source, csharp] ----- -path = DocumentPath.Id(1); ----- -[source, csharp] ----- -Expect("project").WhenSerializing(path.Index); -Expect("project").WhenSerializing(path.Type); -Expect(1).WhenSerializing(path.Id); -var project = new Project { Name = "hello-world" }; ----- -here we create a new document path based on a Project - -[source, csharp] ----- -IDocumentPath path = new DocumentPath(project); ----- -[source, csharp] ----- -Expect("project").WhenSerializing(path.Index); -Expect("project").WhenSerializing(path.Type); -Expect("hello-world").WhenSerializing(path.Id); ----- -You can still override the inferred index and type name - -[source, csharp] ----- -path = new DocumentPath(project).Type("project1"); ----- -[source, csharp] ----- -Expect("project1").WhenSerializing(path.Type); -path = new DocumentPath(project).Index("project1"); -Expect("project1").WhenSerializing(path.Index); ----- -there is also a static way to describe such paths - -[source, csharp] ----- -path = DocumentPath.Id(project); ----- -[source, csharp] ----- -Expect("project").WhenSerializing(path.Index); -Expect("project").WhenSerializing(path.Type); -Expect("hello-world").WhenSerializing(path.Id); -DocumentPath p = project; -var project = new Project { Name = "hello-world" }; ----- -Here we can see and example how DocumentPath helps your describe your requests more tersely - -[source, csharp] ----- -var request = new IndexRequest(2) { Document = project }; ----- -[source, csharp] ----- -request = new IndexRequest(project) { }; ----- -when comparing with the full blown constructor and passing document manually -DocumentPath -T -'s benefits become apparent. - -[source, csharp] ----- -request = new IndexRequest(IndexName.From(), TypeName.From(), 2) -{ - Document = project -}; ----- diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/FieldInference.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/FieldInference.doc.asciidoc deleted file mode 100644 index 8701d88bf1d..00000000000 --- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/FieldInference.doc.asciidoc +++ /dev/null @@ -1,429 +0,0 @@ -# Strongly typed field access - -Several places in the elasticsearch API expect the path to a field from your original source document as a string. -NEST allows you to use C# expressions to strongly type these field path strings. - -These expressions are assigned to a type called `Field` and there are several ways to create a instance of that type - - -Using the constructor directly is possible but rather involved - -[source, csharp] ----- -var fieldString = new Field { Name = "name" }; ----- -especially when using C# expressions since these can not be simply new'ed - -[source, csharp] ----- -Expression> expression = p => p.Name; ----- -[source, csharp] ----- -var fieldExpression = Field.Create(expression); -Expect("name") - .WhenSerializing(fieldExpression) - .WhenSerializing(fieldString); ----- -Therefore you can also implicitly convert strings and expressions to Field's - -[source, csharp] ----- -Field fieldString = "name"; ----- -but for expressions this is still rather involved - -[source, csharp] ----- -Expression> expression = p => p.Name; ----- -[source, csharp] ----- -Field fieldExpression = expression; -Expect("name") - .WhenSerializing(fieldExpression) - .WhenSerializing(fieldString); ----- -to ease creating Field's from expressions there is a static Property class you can use - -[source, csharp] ----- -Field fieldString = "name"; ----- -but for expressions this is still rather involved - -[source, csharp] ----- -var fieldExpression = Infer.Field(p => p.Name); ----- -Using static imports in c# 6 this can be even shortened: -using static Nest.Static; - -[source, csharp] ----- -fieldExpression = Field(p => p.Name); ----- -Now this is much much terser then our first example using the constructor! - -[source, csharp] ----- -Expect("name") - .WhenSerializing(fieldString) - .WhenSerializing(fieldExpression); ----- -By default NEST will camelCase all the field names to be more javascripty - -using DefaultFieldNameInferrer() on ConnectionSettings you can change this behavior - -[source, csharp] ----- -var setup = WithConnectionSettings(s => s.DefaultFieldNameInferrer(p => p.ToUpper())); ----- -[source, csharp] ----- -setup.Expect("NAME").WhenSerializing(Field(p => p.Name)); ----- -However string are *always* passed along verbatim - -[source, csharp] ----- -setup.Expect("NaMe").WhenSerializing("NaMe"); ----- -if you want the same behavior for expressions simply do nothing in the default inferrer - -[source, csharp] ----- -setup = WithConnectionSettings(s => s.DefaultFieldNameInferrer(p => p)); ----- -[source, csharp] ----- -setup.Expect("Name").WhenSerializing(Field(p => p.Name)); ----- -Complex field name expressions - -You can follow your property expression to any depth, here we are traversing to the LeadDeveloper's (Person) FirstName - -[source, csharp] ----- -Expect("leadDeveloper.firstName").WhenSerializing(Field(p => p.LeadDeveloper.FirstName)); ----- -When dealing with collection index access is ingnored allowing you to traverse into properties of collections - -[source, csharp] ----- -Expect("curatedTags").WhenSerializing(Field(p => p.CuratedTags[0])); ----- -Similarly .First() also works, remember these are expressions and not actual code that will be executed - -[source, csharp] ----- -Expect("curatedTags").WhenSerializing(Field(p => p.CuratedTags.First())); ----- -[source, csharp] ----- -Expect("curatedTags.added").WhenSerializing(Field(p => p.CuratedTags[0].Added)); -Expect("curatedTags.name").WhenSerializing(Field(p => p.CuratedTags.First().Name)); ----- -When we see an indexer on a dictionary we assume they describe property names - -[source, csharp] ----- -Expect("metadata.hardcoded").WhenSerializing(Field(p => p.Metadata["hardcoded"])); ----- -[source, csharp] ----- -Expect("metadata.hardcoded.created").WhenSerializing(Field(p => p.Metadata["hardcoded"].Created)); ----- -A cool feature here is that we'll evaluate variables passed to these indexers - -[source, csharp] ----- -var variable = "var"; ----- -[source, csharp] ----- -Expect("metadata.var").WhenSerializing(Field(p => p.Metadata[variable])); -Expect("metadata.var.created").WhenSerializing(Field(p => p.Metadata[variable].Created)); ----- -If you are using elasticearch's multifield mapping (you really should!) these "virtual" sub fields -do not always map back on to your POCO, by calling .Suffix() you describe the sub fields that do not live in your c# objects - -[source, csharp] ----- -Expect("leadDeveloper.firstName.raw").WhenSerializing(Field(p => p.LeadDeveloper.FirstName.Suffix("raw"))); ----- -[source, csharp] ----- -Expect("curatedTags.raw").WhenSerializing(Field(p => p.CuratedTags[0].Suffix("raw"))); -Expect("curatedTags.raw").WhenSerializing(Field(p => p.CuratedTags.First().Suffix("raw"))); -Expect("curatedTags.added.raw").WhenSerializing(Field(p => p.CuratedTags[0].Added.Suffix("raw"))); -Expect("metadata.hardcoded.raw").WhenSerializing(Field(p => p.Metadata["hardcoded"].Suffix("raw"))); -Expect("metadata.hardcoded.created.raw").WhenSerializing(Field(p => p.Metadata["hardcoded"].Created.Suffix("raw"))); ----- -You can even chain them to any depth! - -[source, csharp] ----- -Expect("curatedTags.name.raw.evendeeper").WhenSerializing(Field(p => p.CuratedTags.First().Name.Suffix("raw").Suffix("evendeeper"))); ----- -Variables passed to suffix will be evaluated as well - -[source, csharp] ----- -var suffix = "unanalyzed"; ----- -[source, csharp] ----- -Expect("metadata.var.unanalyzed").WhenSerializing(Field(p => p.Metadata[variable].Suffix(suffix))); -Expect("metadata.var.created.unanalyzed").WhenSerializing(Field(p => p.Metadata[variable].Created.Suffix(suffix))); ----- - -Suffixes can be appended to expressions. This is useful in cases where you want to apply the same suffix -to a list of fields - - - - -[source, csharp] ----- -var expressions = new List>> -{ - p => p.Name, - p => p.Description, - p => p.CuratedTags.First().Name, - p => p.LeadDeveloper.FirstName -}; ----- -append the suffix "raw" to each expression - -[source, csharp] ----- -var fieldExpressions = - expressions.Select>, Field>(e => e.AppendSuffix("raw")).ToList(); ----- -[source, csharp] ----- -Expect("name.raw").WhenSerializing(fieldExpressions[0]); -Expect("description.raw").WhenSerializing(fieldExpressions[1]); -Expect("curatedTags.name.raw").WhenSerializing(fieldExpressions[2]); -Expect("leadDeveloper.firstName.raw").WhenSerializing(fieldExpressions[3]); ----- -Annotations - -When using NEST's property attributes you can specify a new name for the properties - -[source, csharp] ----- -public class BuiltIn -{ - [String(Name = "naam")] - public string Name { get; set; } -} ----- -[source, csharp] ----- -Expect("naam").WhenSerializing(Field(p => p.Name)); ----- - -Starting with NEST 2.x we also ask the serializer if it can resolve the property to a name. -Here we ask the default JsonNetSerializer and it takes JsonProperty into account - -[source, csharp] ----- -public class SerializerSpecific -{ - [JsonProperty("nameInJson")] - public string Name { get; set; } -} ----- -[source, csharp] ----- -Expect("nameInJson").WhenSerializing(Field(p => p.Name)); ----- - -If both are specified NEST takes precedence though - -[source, csharp] ----- -public class Both -{ - [String(Name = "naam")] - [JsonProperty("nameInJson")] - public string Name { get; set; } -} ----- -[source, csharp] ----- -Expect("naam").WhenSerializing(Field(p => p.Name)); -Expect(new - { - naam = "Martijn Laarman" - }).WhenSerializing(new Both { Name = "Martijn Laarman" }); ----- -[source, csharp] ----- -class A { public C C { get; set; } } ----- -[source, csharp] ----- -class B { public C C { get; set; } } ----- -[source, csharp] ----- -class C -{ - public string Name { get; set; } -} ----- - -Resolving field names is cached but this is per connection settings - - -[source, csharp] ----- -var connectionSettings = TestClient.CreateSettings(forceInMemory: true); -var client = new ElasticClient(connectionSettings); -var fieldNameOnA = client.Infer.Field(Field(p => p.C.Name)); -var fieldNameOnB = client.Infer.Field(Field(p => p.C.Name)); ----- -Here we have to similary shaped expressions on coming from A and on from B -that will resolve to the same field name, as expected - -[source, csharp] ----- -fieldNameOnA.Should().Be("c.name"); ----- -[source, csharp] ----- -fieldNameOnB.Should().Be("c.name"); ----- -now we create a new connectionsettings with a remap for C on class A to `d` -now when we resolve the field path for A will be different - -[source, csharp] ----- -var newConnectionSettings = TestClient.CreateSettings(forceInMemory: true, modifySettings: s => s - .InferMappingFor(m => m - .Rename(p => p.C, "d") - ) -); ----- -[source, csharp] ----- -var newClient = new ElasticClient(newConnectionSettings); -fieldNameOnA = newClient.Infer.Field(Field(p => p.C.Name)); -fieldNameOnB = newClient.Infer.Field(Field(p => p.C.Name)); -fieldNameOnA.Should().Be("d.name"); -fieldNameOnB.Should().Be("c.name"); ----- -however we didn't break inferrence on the first client instance using its separate connectionsettings - -[source, csharp] ----- -fieldNameOnA = client.Infer.Field(Field(p => p.C.Name)); ----- -[source, csharp] ----- -fieldNameOnB = client.Infer.Field(Field(p => p.C.Name)); -fieldNameOnA.Should().Be("c.name"); -fieldNameOnB.Should().Be("c.name"); ----- -To wrap up lets showcase the precedence that field names are inferred -1. A hard rename of the property on connection settings using Rename() -2. A NEST property mapping -3. Ask the serializer if the property has a verbatim value e.g it has an explicit JsonPropery attribute. -4. Pass the MemberInfo's Name to the DefaultFieldNameInferrer which by default camelCases -In the following example we have a class where each case wins - -[source, csharp] ----- -class Precedence -{ ----- -Eventhough this property has a NEST property mapping and a JsonProperty attribute -We are going to provide a hard rename for it on ConnectionSettings later that should win. - -[source, csharp] ----- -[String(Name = "renamedIgnoresNest")] - [JsonProperty("renamedIgnoresJsonProperty")] - public string RenamedOnConnectionSettings { get; set; } ----- -This property has both a NEST attribute and a JsonProperty, NEST should win. - -[source, csharp] ----- -[String(Name = "nestAtt")] - [JsonProperty("jsonProp")] - public string NestAttribute { get; set; } ----- -We should take the json property into account by itself - -[source, csharp] ----- -[JsonProperty("jsonProp")] - public string JsonProperty { get; set; } ----- -This property we are going to special case in our custom serializer to resolve to `ask` - -[source, csharp] ----- -[JsonProperty("dontaskme")] - public string AskSerializer { get; set; } ----- -We are going to register a DefaultFieldNameInferrer on ConnectionSettings -that will uppercase all properties. - -[source, csharp] ----- -public string DefaultFieldNameInferrer { get; set; } - -} ----- -[source, csharp] ----- -var usingSettings = WithConnectionSettings(s => s ----- -here we provide an explicit rename of a property on connectionsettings - -[source, csharp] ----- -.InferMappingFor(m => m - .Rename(p => p.RenamedOnConnectionSettings, "renamed") - ) ----- -All properties that are not mapped verbatim should be uppercased - -[source, csharp] ----- -.DefaultFieldNameInferrer(p => p.ToUpperInvariant()) -).WithSerializer(s => new CustomSerializer(s)); ----- -[source, csharp] ----- -usingSettings.Expect("renamed").ForField(Field(p => p.RenamedOnConnectionSettings)); -usingSettings.Expect("nestAtt").ForField(Field(p => p.NestAttribute)); -usingSettings.Expect("jsonProp").ForField(Field(p => p.JsonProperty)); -usingSettings.Expect("ask").ForField(Field(p => p.AskSerializer)); -usingSettings.Expect("DEFAULTFIELDNAMEINFERRER").ForField(Field(p => p.DefaultFieldNameInferrer)); ----- -The same rules apply when indexing an object - -[source, csharp] ----- -usingSettings.Expect(new [] -{ - "ask", - "DEFAULTFIELDNAMEINFERRER", - "jsonProp", - "nestAtt", - "renamed" -}).AsPropertiesOf(new Precedence -{ - RenamedOnConnectionSettings = "renamed on connection settings", - NestAttribute = "using a nest attribute", - JsonProperty = "the default serializer resolves json property attributes", - AskSerializer = "serializer fiddled with this one", - DefaultFieldNameInferrer = "shouting much?" -}); ----- diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/FieldNames/FieldInference.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/FieldNames/FieldInference.doc.asciidoc deleted file mode 100644 index 82b58a192ec..00000000000 --- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/FieldNames/FieldInference.doc.asciidoc +++ /dev/null @@ -1,313 +0,0 @@ -# Strongly typed field access - -Several places in the elasticsearch API expect the path to a field from your original source document as a string. -NEST allows you to use C# expressions to strongly type these field path strings. -These expressions are assigned to a type called `Field` and there are several ways to create a instance of that type - -Using the constructor directly is possible but rather involved - -[source, csharp] ----- -var fieldString = new Field { Name = "name" }; ----- -especially when using C# expressions since these can not be simply new'ed - -[source, csharp] ----- -Expression> expression = p => p.Name; ----- -[source, csharp] ----- -var fieldExpression = Field.Create(expression); -Expect("name") - .WhenSerializing(fieldExpression) - .WhenSerializing(fieldString); ----- -Therefor you can also implicitly convert strings and expressions to Field's - -[source, csharp] ----- -Field fieldString = "name"; ----- -but for expressions this is still rather involved - -[source, csharp] ----- -Expression> expression = p => p.Name; ----- -[source, csharp] ----- -Field fieldExpression = expression; -Expect("name") - .WhenSerializing(fieldExpression) - .WhenSerializing(fieldString); ----- -to ease creating Field's from expressions there is a static Property class you can use - -[source, csharp] ----- -Field fieldString = "name"; ----- -but for expressions this is still rather involved - -[source, csharp] ----- -var fieldExpression = Field(p => p.Name); ----- -Using static imports in c# 6 this can be even shortened: -using static Nest.Static; - -[source, csharp] ----- -fieldExpression = Field(p => p.Name); ----- -Now this is much much terser then our first example using the constructor! - -[source, csharp] ----- -Expect("name") - .WhenSerializing(fieldString) - .WhenSerializing(fieldExpression); ----- -By default NEST will camelCase all the field names to be more javascripty - -using DefaultFieldNameInferrer() on ConnectionSettings you can change this behavior - -[source, csharp] ----- -var setup = WithConnectionSettings(s => s.DefaultFieldNameInferrer(p => p.ToUpper())); ----- -[source, csharp] ----- -setup.Expect("NAME").WhenSerializing(Field(p => p.Name)); ----- -However string are *always* passed along verbatim - -[source, csharp] ----- -setup.Expect("NaMe").WhenSerializing("NaMe"); ----- -if you want the same behavior for expressions simply do nothing in the default inferrer - -[source, csharp] ----- -setup = WithConnectionSettings(s => s.DefaultFieldNameInferrer(p => p)); ----- -[source, csharp] ----- -setup.Expect("Name").WhenSerializing(Field(p => p.Name)); ----- -Complex field name expressions - -You can follow your property expression to any depth, here we are traversing to the LeadDeveloper's (Person) FirstName - -[source, csharp] ----- -Expect("leadDeveloper.firstName").WhenSerializing(Field(p => p.LeadDeveloper.FirstName)); ----- -When dealing with collection index access is ingnored allowing you to traverse into properties of collections - -[source, csharp] ----- -Expect("curatedTags").WhenSerializing(Field(p => p.CuratedTags[0])); ----- -Similarly .First() also works, remember these are expressions and not actual code that will be executed - -[source, csharp] ----- -Expect("curatedTags").WhenSerializing(Field(p => p.CuratedTags.First())); ----- -[source, csharp] ----- -Expect("curatedTags.added").WhenSerializing(Field(p => p.CuratedTags[0].Added)); -Expect("curatedTags.name").WhenSerializing(Field(p => p.CuratedTags.First().Name)); ----- -When we see an indexer on a dictionary we assume they describe property names - -[source, csharp] ----- -Expect("metadata.hardcoded").WhenSerializing(Field(p => p.Metadata["hardcoded"])); ----- -[source, csharp] ----- -Expect("metadata.hardcoded.created").WhenSerializing(Field(p => p.Metadata["hardcoded"].Created)); ----- -A cool feature here is that we'll evaluate variables passed to these indexers - -[source, csharp] ----- -var variable = "var"; ----- -[source, csharp] ----- -Expect("metadata.var").WhenSerializing(Field(p => p.Metadata[variable])); -Expect("metadata.var.created").WhenSerializing(Field(p => p.Metadata[variable].Created)); ----- -If you are using elasticearch's multifield mapping (you really should!) these "virtual" sub fields -do not always map back on to your POCO, by calling .Suffix() you describe the sub fields that do not live in your c# objects - -[source, csharp] ----- -Expect("leadDeveloper.firstName.raw").WhenSerializing(Field(p => p.LeadDeveloper.FirstName.Suffix("raw"))); ----- -[source, csharp] ----- -Expect("curatedTags.raw").WhenSerializing(Field(p => p.CuratedTags[0].Suffix("raw"))); -Expect("curatedTags.raw").WhenSerializing(Field(p => p.CuratedTags.First().Suffix("raw"))); -Expect("curatedTags.added.raw").WhenSerializing(Field(p => p.CuratedTags[0].Added.Suffix("raw"))); -Expect("metadata.hardcoded.raw").WhenSerializing(Field(p => p.Metadata["hardcoded"].Suffix("raw"))); -Expect("metadata.hardcoded.created.raw").WhenSerializing(Field(p => p.Metadata["hardcoded"].Created.Suffix("raw"))); ----- -You can even chain them to any depth! - -[source, csharp] ----- -Expect("curatedTags.name.raw.evendeeper").WhenSerializing(Field(p => p.CuratedTags.First().Name.Suffix("raw").Suffix("evendeeper"))); ----- -Variables passed to suffix will be evaluated as well - -[source, csharp] ----- -var suffix = "unanalyzed"; ----- -[source, csharp] ----- -Expect("metadata.var.unanalyzed").WhenSerializing(Field(p => p.Metadata[variable].Suffix(suffix))); -Expect("metadata.var.created.unanalyzed").WhenSerializing(Field(p => p.Metadata[variable].Created.Suffix(suffix))); ----- -Annotations - -When using NEST's property attributes you can specify a new name for the properties - -[source, csharp] ----- -Expect("naam").WhenSerializing(Field(p => p.Name)); ----- - -Starting with NEST 2.x we also ask the serializer if it can resolve the property to a name. -Here we ask the default JsonNetSerializer and it takes JsonProperty into account - -[source, csharp] ----- -Expect("nameInJson").WhenSerializing(Field(p => p.Name)); ----- - -If both are specified NEST takes precedence though - -[source, csharp] ----- -Expect("naam").WhenSerializing(Field(p => p.Name)); -Expect(new - { - naam = "Martijn Laarman" - }).WhenSerializing(new Both { Name = "Martijn Laarman" }); ----- -Resolving field names is cached but this is per connection settings - -[source, csharp] ----- -var connectionSettings = TestClient.CreateSettings(forceInMemory: true); -var client = new ElasticClient(connectionSettings); -var fieldNameOnA = client.Infer.Field(Field(p => p.C.Name)); -var fieldNameOnB = client.Infer.Field(Field(p => p.C.Name)); ----- -Here we have to similary shaped expressions on coming from A and on from B -that will resolve to the same field name, as expected - -[source, csharp] ----- -fieldNameOnA.Should().Be("c.name"); ----- -[source, csharp] ----- -fieldNameOnB.Should().Be("c.name"); ----- -now we create a new connectionsettings with a remap for C on class A to `d` -now when we resolve the field path for A will be different - -[source, csharp] ----- -var newConnectionSettings = TestClient.CreateSettings(forceInMemory: true, modifySettings: s => s - .InferMappingFor(m => m - .Rename(p => p.C, "d") - ) -); ----- -[source, csharp] ----- -var newClient = new ElasticClient(newConnectionSettings); -fieldNameOnA = newClient.Infer.Field(Field(p => p.C.Name)); -fieldNameOnB = newClient.Infer.Field(Field(p => p.C.Name)); -fieldNameOnA.Should().Be("d.name"); -fieldNameOnB.Should().Be("c.name"); ----- -however we didn't break inferrence on the first client instance using its separate connectionsettings - -[source, csharp] ----- -fieldNameOnA = client.Infer.Field(Field(p => p.C.Name)); ----- -[source, csharp] ----- -fieldNameOnB = client.Infer.Field(Field(p => p.C.Name)); -fieldNameOnA.Should().Be("c.name"); -fieldNameOnB.Should().Be("c.name"); ----- -To wrap up lets showcase the precedence that field names are inferred -1. A hard rename of the property on connection settings using Rename() -2. A NEST property mapping -3. Ask the serializer if the property has a verbatim value e.g it has an explicit JsonPropery attribute. -4. Pass the MemberInfo's Name to the DefaultFieldNameInferrer which by default camelCases -In the following example we have a class where each case wins - - -Here we create a custom converter that renames any property named `AskSerializer` to `ask` - -[source, csharp] ----- -var usingSettings = WithConnectionSettings(s => s ----- -here we provide an explicit rename of a property on connectionsettings - -[source, csharp] ----- -.InferMappingFor(m => m - .Rename(p => p.RenamedOnConnectionSettings, "renamed") - ) ----- -All properties that are not mapped verbatim should be uppercased - -[source, csharp] ----- -.DefaultFieldNameInferrer(p => p.ToUpperInvariant()) -).WithSerializer(s => new CustomSerializer(s)); ----- -[source, csharp] ----- -usingSettings.Expect("renamed").ForField(Field(p => p.RenamedOnConnectionSettings)); -usingSettings.Expect("nestAtt").ForField(Field(p => p.NestAttribute)); -usingSettings.Expect("jsonProp").ForField(Field(p => p.JsonProperty)); -usingSettings.Expect("ask").ForField(Field(p => p.AskSerializer)); -usingSettings.Expect("DEFAULTFIELDNAMEINFERRER").ForField(Field(p => p.DefaultFieldNameInferrer)); ----- -The same rules apply when indexing an object - -[source, csharp] ----- -usingSettings.Expect(new [] -{ - "ask", - "DEFAULTFIELDNAMEINFERRER", - "jsonProp", - "nestAtt", - "renamed" -}).AsPropertiesOf(new Precedence -{ - RenamedOnConnectionSettings = "renamed on connection settings", - NestAttribute = "using a nest attribute", - JsonProperty = "the default serializer resolves json property attributes", - AskSerializer = "serializer fiddled with this one", - DefaultFieldNameInferrer = "shouting much?" -}); ----- - diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/Id/IdsInference.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/Id/IdsInference.doc.asciidoc deleted file mode 100644 index 6a659778cb3..00000000000 --- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/Id/IdsInference.doc.asciidoc +++ /dev/null @@ -1,79 +0,0 @@ -# Ids - -Several places in the elasticsearch API expect an Id object to be passed. This is a special box type that you can implicitly convert to and from many value types. - -Methods that take an Id can be passed longs, ints, strings -Guids and they will implicitly converted to Ids - -[source, csharp] ----- -Nest.Id idFromInt = 1; -Nest.Id idFromLong = 2L; -Nest.Id idFromString = "hello-world"; -Nest.Id idFromGuid = new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E"); -Expect(1).WhenSerializing(idFromInt); -Expect(2).WhenSerializing(idFromLong); -Expect("hello-world").WhenSerializing(idFromString); -Expect("d70bd3cf-4e38-46f3-91ca-fcbef29b148e").WhenSerializing(idFromGuid); ----- -Sometimes a method takes an object and we need an Id from that object to build up a path. -There is no implicit conversion from any object to Id but we can call Id.From. -Imagine your codebase has the following type that we want to index into elasticsearch - -By default NEST will try to find a property called `Id` on the class using reflection -and create a cached fast func delegate based on the properties getter - -[source, csharp] ----- -var dto = new MyDTO { Id =new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E"), Name = "x", OtherName = "y" }; ----- -[source, csharp] ----- -Expect("d70bd3cf-4e38-46f3-91ca-fcbef29b148e").WhenInferringIdOn(dto); ----- -Using the connection settings you can specify a different property NEST should look for ids. -Here we instruct NEST to infer the Id for MyDTO based on its Name property - -[source, csharp] ----- -WithConnectionSettings(x => x - .InferMappingFor(m => m - .IdProperty(p => p.Name) - ) -).Expect("x").WhenInferringIdOn(dto); ----- -Even though we have a cache at play the cache is per connection settings, so we can create a different config - -[source, csharp] ----- -WithConnectionSettings(x => x - .InferMappingFor(m => m - .IdProperty(p => p.OtherName) - ) -).Expect("y").WhenInferringIdOn(dto); ----- -Another way is to mark the type with an ElasticType attribute, using a string IdProperty - -Now when we infer the id we expect it to be the Name property without doing any configuration on the ConnectionSettings - -[source, csharp] ----- -var dto = new MyOtherDTO { Id =new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E"), Name = "x", OtherName = "y" }; ----- -[source, csharp] ----- -Expect("x").WhenInferringIdOn(dto); ----- -This attribute IS cached statically/globally, however connectionsettings with a config for the type will -still win over this static configuration - -[source, csharp] ----- -WithConnectionSettings(x => x - .InferMappingFor(m => m - .IdProperty(p => p.OtherName) - ) -).Expect("y").WhenInferringIdOn(dto); ----- -Eventhough we have a cache at play the cache its per connection settings, so we can create a different config - diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/IdsInference.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/IdsInference.doc.asciidoc deleted file mode 100644 index b5818a65d59..00000000000 --- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/IdsInference.doc.asciidoc +++ /dev/null @@ -1,98 +0,0 @@ -# Ids - -Several places in the elasticsearch API expect an Id object to be passed. This is a special box type that you can implicitly convert to and from many value types. - - -Methods that take an Id can be passed longs, ints, strings & Guids and they will implicitly converted to Ids - -[source, csharp] ----- -Id idFromInt = 1; -Id idFromLong = 2L; -Id idFromString = "hello-world"; -Id idFromGuid = new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E"); -Expect(1).WhenSerializing(idFromInt); -Expect(2).WhenSerializing(idFromLong); -Expect("hello-world").WhenSerializing(idFromString); -Expect("d70bd3cf-4e38-46f3-91ca-fcbef29b148e").WhenSerializing(idFromGuid); ----- -Sometimes a method takes an object and we need an Id from that object to build up a path. -There is no implicit conversion from any object to Id but we can call Id.From. -Imagine your codebase has the following type that we want to index into elasticsearch - -[source, csharp] ----- -class MyDTO -{ - public Guid Id { get; set; } - public string Name { get; set; } - public string OtherName { get; set; } -} ----- -By default NEST will try to find a property called `Id` on the class using reflection -and create a cached fast func delegate based on the properties getter - -[source, csharp] ----- -var dto = new MyDTO { Id =new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E"), Name = "x", OtherName = "y" }; ----- -[source, csharp] ----- -Expect("d70bd3cf-4e38-46f3-91ca-fcbef29b148e").WhenInferringIdOn(dto); ----- -Using the connection settings you can specify a different property NEST should look for ids. -Here we instruct NEST to infer the Id for MyDTO based on its Name property - -[source, csharp] ----- -WithConnectionSettings(x => x - .InferMappingFor(m => m - .IdProperty(p => p.Name) - ) -).Expect("x").WhenInferringIdOn(dto); ----- -Even though we have a cache at play the cache is per connection settings, so we can create a different config - -[source, csharp] ----- -WithConnectionSettings(x => x - .InferMappingFor(m => m - .IdProperty(p => p.OtherName) - ) -).Expect("y").WhenInferringIdOn(dto); ----- -Another way is to mark the type with an ElasticType attribute, using a string IdProperty - -[source, csharp] ----- -[ElasticsearchType(IdProperty = nameof(Name))] -class MyOtherDTO -{ - public Guid Id { get; set; } - public string Name { get; set; } - public string OtherName { get; set; } -} ----- -Now when we infer the id we expect it to be the Name property without doing any configuration on the ConnectionSettings - -[source, csharp] ----- -var dto = new MyOtherDTO { Id =new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E"), Name = "x", OtherName = "y" }; ----- -[source, csharp] ----- -Expect("x").WhenInferringIdOn(dto); ----- -This attribute IS cached statically/globally, however connectionsettings with a config for the type will -still win over this static configuration - -[source, csharp] ----- -WithConnectionSettings(x => x - .InferMappingFor(m => m - .IdProperty(p => p.OtherName) - ) -).Expect("y").WhenInferringIdOn(dto); ----- -Eventhough we have a cache at play the cache its per connection settings, so we can create a different config - diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/Indices/IndicesPaths.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/Indices/IndicesPaths.doc.asciidoc deleted file mode 100644 index cc902f17fde..00000000000 --- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/Indices/IndicesPaths.doc.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -# Indices paths - -Some API's in elasticsearch take one or many index name or a special "_all" marker to send the request to all the indices -In nest this is encoded using `Indices` - -Several types implicitly convert to `Indices` - -[source, csharp] ----- -Nest.Indices singleIndexFromString = "name"; -Nest.Indices multipleIndicesFromString = "name1, name2"; -Nest.Indices allFromString = "_all"; -Nest.Indices allWithOthersFromString = "_all, name2"; -singleIndexFromString.Match( - all => all.Should().BeNull(), - many => many.Indices.Should().HaveCount(1).And.Contain("name") - ); ----- -to ease creating Field's from expressions there is a static Property class you can use - - - -[source, csharp] ----- -var all = Nest.Indices.All; ----- -[source, csharp] ----- -var many = Nest.Indices.Index("name1", "name2"); -var manyTyped = Nest.Indices.Index().And(); -var singleTyped = Nest.Indices.Index(); -var singleString = Nest.Indices.Index("name1"); -var invalidSingleString = Nest.Indices.Index("name1, name2"); ----- diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/IndicesPaths.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/IndicesPaths.doc.asciidoc deleted file mode 100644 index c6c5a78f898..00000000000 --- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/IndicesPaths.doc.asciidoc +++ /dev/null @@ -1,47 +0,0 @@ -# Indices paths - -Some API's in elasticsearch take one or many index name or a special "_all" marker to send the request to all the indices -In nest this is encoded using `Indices` - - -Several types implicitly convert to `Indices` - -[source, csharp] ----- -Nest.Indices singleIndexFromString = "name"; -Nest.Indices multipleIndicesFromString = "name1, name2"; -Nest.Indices allFromString = "_all"; -Nest.Indices allWithOthersFromString = "_all, name2"; -singleIndexFromString.Match( - all => all.Should().BeNull(), - many => many.Indices.Should().HaveCount(1).And.Contain("name") - ); -multipleIndicesFromString.Match( - all => all.Should().BeNull(), - many => many.Indices.Should().HaveCount(2).And.Contain("name2") - ); -allFromString.Match( - all => all.Should().NotBeNull(), - many => many.Indices.Should().BeNull() - ); -allWithOthersFromString.Match( - all => all.Should().NotBeNull(), - many => many.Indices.Should().BeNull() - ); ----- -to ease creating Field's from expressions there is a static Property class you can use - - - -[source, csharp] ----- -var all = Nest.Indices.All; ----- -[source, csharp] ----- -var many = Nest.Indices.Index("name1", "name2"); -var manyTyped = Nest.Indices.Index().And(); -var singleTyped = Nest.Indices.Index(); -var singleString = Nest.Indices.Index("name1"); -var invalidSingleString = Nest.Indices.Index("name1, name2"); ----- diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/PropertyInference.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/PropertyInference.doc.asciidoc deleted file mode 100644 index d21a723cd1b..00000000000 --- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/PropertyInference.doc.asciidoc +++ /dev/null @@ -1,6 +0,0 @@ -[source, csharp] ----- -Expression> expression = p => p.Name.Suffix("raw"); -Expect("raw").WhenSerializing(expression); -Assert.Throws(() => Expect("exception!").WhenSerializing("name.raw")); ----- diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Mapping/AutoMap.doc.asciidoc b/docs/asciidoc/ClientConcepts/HighLevel/Mapping/AutoMap.doc.asciidoc deleted file mode 100644 index 9db88699059..00000000000 --- a/docs/asciidoc/ClientConcepts/HighLevel/Mapping/AutoMap.doc.asciidoc +++ /dev/null @@ -1,904 +0,0 @@ -# Auto mapping properties - -When creating a mapping (either when creating an index or via the put mapping API), -NEST offers a feature called AutoMap(), which will automagically infer the correct -Elasticsearch datatypes of the POCO properties you are mapping. Alternatively, if -you're using attributes to map your properties, then calling AutoMap() is required -in order for your attributes to be applied. We'll look at examples of both. - - - -For these examples, we'll define two POCOS. A Company, which has a name -and a collection of Employees. And Employee, which has various properties of -different types, and itself has a collection of Employees. - -[source, csharp] ----- -public class Company -{ - public string Name { get; set; } - public List Employees { get; set; } -} ----- -[source, csharp] ----- -public class Employee -{ - public string FirstName { get; set; } - public string LastName { get; set; } - public int Salary { get; set; } - public DateTime Birthday { get; set; } - public bool IsManager { get; set; } - public List Employees { get; set; } - public TimeSpan Hours { get; set;} -} ----- -## Manual mapping -To create a mapping for our Company type, we can use the fluent API -and map each property explicitly - -[source, csharp] ----- -var descriptor = new CreateIndexDescriptor("myindex") - .Mappings(ms => ms - .Map(m => m - .Properties(ps => ps - .String(s => s - .Name(c => c.Name) - ) - .Object(o => o - .Name(c => c.Employees) - .Properties(eps => eps - .String(s => s - .Name(e => e.FirstName) - ) - .String(s => s - .Name(e => e.LastName) - ) - .Number(n => n - .Name(e => e.Salary) - .Type(NumberType.Integer) - ) - ) - ) - ) - ) - ); ----- -Which is all fine and dandy, and useful for some use cases. However in most cases -this is becomes too cumbersome of an approach, and you simply just want to map *all* -the properties of your POCO in a single go. - -[source, csharp] ----- -var expected = new -{ - mappings = new - { - company = new - { - properties = new - { - name = new - { - type = "string" - }, - employees = new - { - type = "object", - properties = new - { - firstName = new - { - type = "string" - }, - lastName = new - { - type = "string" - }, - salary = new - { - type = "integer" - } - } - } - } - } - } -}; ----- -[source, csharp] ----- -Expect(expected).WhenSerializing((ICreateIndexRequest) descriptor); ----- -## Simple Automapping -This is exactly where `AutoMap()` becomes useful. Instead of manually mapping each property, -explicitly, we can instead call `.AutoMap()` for each of our mappings and let NEST do all the work - -[source, csharp] ----- -var descriptor = new CreateIndexDescriptor("myindex") - .Mappings(ms => ms - .Map(m => m.AutoMap()) - .Map(m => m.AutoMap()) - ); ----- -Observe that NEST has inferred the Elasticsearch types based on the CLR type of our POCO properties. -In this example, -- Birthday was mapped as a date, -- Hours was mapped as a long (ticks) -- IsManager was mapped as a boolean, -- Salary as an integer -- Employees as an object -and the remaining string properties as strings. - -[source, csharp] ----- -var expected = new -{ - mappings = new - { - company = new - { - properties = new - { - employees = new - { - properties = new - { - birthday = new - { - type = "date" - }, - employees = new - { - properties = new { }, - type = "object" - }, - firstName = new - { - type = "string" - }, - hours = new - { - type = "long" - }, - isManager = new - { - type = "boolean" - }, - lastName = new - { - type = "string" - }, - salary = new - { - type = "integer" - } - }, - type = "object" - }, - name = new - { - type = "string" - } - } - }, - employee = new - { - properties = new - { - birthday = new - { - type = "date" - }, - employees = new - { - properties = new { }, - type = "object" - }, - firstName = new - { - type = "string" - }, - hours = new - { - type = "long" - }, - isManager = new - { - type = "boolean" - }, - lastName = new - { - type = "string" - }, - salary = new - { - type = "integer" - } - } - } - } -}; ----- -[source, csharp] ----- -Expect(expected).WhenSerializing((ICreateIndexRequest) descriptor); ----- -## Automapping with overrides -In most cases, you'll want to map more than just the vanilla datatypes and also provide -various options on your properties (analyzer, doc_values, etc...). In that case, it's -possible to use AutoMap() in conjuction with explicitly mapped properties. - - -Here we are using AutoMap() to automatically map our company type, but then we're -overriding our employee property and making it a `nested` type, since by default, -AutoMap() will infer objects as `object`. - -[source, csharp] ----- -var descriptor = new CreateIndexDescriptor("myindex") - .Mappings(ms => ms - .Map(m => m - .AutoMap() - .Properties(ps => ps - .Nested(n => n - .Name(c => c.Employees) - .Properties(eps => eps - // snip - ) - ) - ) - ) - ); ----- -[source, csharp] ----- -var expected = new - { - mappings = new - { - company = new - { - properties = new - { - name = new - { - type = "string" - }, - employees = new - { - type = "nested", - properties = new {} - } - } - } - } - }; -Expect(expected).WhenSerializing((ICreateIndexRequest) descriptor); ----- -## Automap with attributes -It is also possible to define your mappings using attributes on your POCOS. When you -use attributes, you MUST use AutoMap() in order for the attributes to be applied. -Here we define the same two types but this time using attributes. - -[source, csharp] ----- -[ElasticsearchType(Name = "company")] -public class CompanyWithAttributes -{ - [String(Analyzer = "keyword", NullValue = "null", Similarity = SimilarityOption.BM25)] - public string Name { get; set; } - - [String] - public TimeSpan? HeadOfficeHours { get; set; } - - [Object(Path = "employees", Store = false)] - public List Employees { get; set; } -} ----- -[source, csharp] ----- -[ElasticsearchType(Name = "employee")] -public class EmployeeWithAttributes -{ - [String] - public string FirstName { get; set; } - - [String] - public string LastName { get; set; } - - [Number(DocValues = false, IgnoreMalformed = true, Coerce = true)] - public int Salary { get; set; } - - [Date(Format = "MMddyyyy", NumericResolution = NumericResolutionUnit.Seconds)] - public DateTime Birthday { get; set; } - - [Boolean(NullValue = false, Store = true)] - public bool IsManager { get; set; } - - [Nested(Path = "employees")] - [JsonProperty("empl")] - public List Employees { get; set; } -} ----- -[source, csharp] ----- -var descriptor = new CreateIndexDescriptor("myindex") - .Mappings(ms => ms - .Map(m => m.AutoMap()) - .Map(m => m.AutoMap()) - ); -var expected = new - { - mappings = new - { - company = new - { - properties = new - { - employees = new - { - path = "employees", - properties = new - { - birthday = new - { - type = "date" - }, - employees = new - { - properties = new { }, - type = "object" - }, - firstName = new - { - type = "string" - }, - hours = new - { - type = "long" - }, - isManager = new - { - type = "boolean" - }, - lastName = new - { - type = "string" - }, - salary = new - { - type = "integer" - } - }, - store = false, - type = "object" - }, - name = new - { - analyzer = "keyword", - null_value = "null", - similarity = "BM25", - type = "string" - }, - headOfficeHours = new - { - type = "string" - } - } - }, - employee = new - { - properties = new - { - birthday = new - { - format = "MMddyyyy", - numeric_resolution = "seconds", - type = "date" - }, - empl = new - { - path = "employees", - properties = new - { - birthday = new - { - type = "date" - }, - employees = new - { - properties = new { }, - type = "object" - }, - firstName = new - { - type = "string" - }, - hours = new - { - type = "long" - }, - isManager = new - { - type = "boolean" - }, - lastName = new - { - type = "string" - }, - salary = new - { - type = "integer" - } - }, - type = "nested" - }, - firstName = new - { - type = "string" - }, - isManager = new - { - null_value = false, - store = true, - type = "boolean" - }, - lastName = new - { - type = "string" - }, - salary = new - { - coerce = true, - doc_values = false, - ignore_malformed = true, - type = "double" - } - } - } - } - }; -Expect(expected).WhenSerializing(descriptor as ICreateIndexRequest); ----- - -Just as we were able to override the inferred properties in our earlier example, explicit (manual) -mappings also take precedence over attributes. Therefore we can also override any mappings applied -via any attributes defined on the POCO - - -[source, csharp] ----- -var descriptor = new CreateIndexDescriptor("myindex") - .Mappings(ms => ms - .Map(m => m - .AutoMap() - .Properties(ps => ps - .Nested(n => n - .Name(c => c.Employees) - ) - ) - ) - .Map(m => m - .AutoMap() - .TtlField(ttl => ttl - .Enable() - .Default("10m") - ) - .Properties(ps => ps - .String(s => s - .Name(e => e.FirstName) - .Fields(fs => fs - .String(ss => ss - .Name("firstNameRaw") - .Index(FieldIndexOption.NotAnalyzed) - ) - .TokenCount(t => t - .Name("length") - .Analyzer("standard") - ) - ) - ) - .Number(n => n - .Name(e => e.Salary) - .Type(NumberType.Double) - .IgnoreMalformed(false) - ) - .Date(d => d - .Name(e => e.Birthday) - .Format("MM-dd-yy") - ) - ) - ) - ); -var expected = new - { - mappings = new - { - company = new - { - properties = new - { - employees = new - { - type = "nested" - }, - name = new - { - analyzer = "keyword", - null_value = "null", - similarity = "BM25", - type = "string" - }, - headOfficeHours = new - { - type = "string" - } - } - }, - employee = new - { - _ttl = new - { - enabled = true, - @default = "10m" - }, - properties = new - { - birthday = new - { - format = "MM-dd-yy", - type = "date" - }, - empl = new - { - path = "employees", - properties = new - { - birthday = new - { - type = "date" - }, - employees = new - { - properties = new { }, - type = "object" - }, - firstName = new - { - type = "string" - }, - hours = new - { - type = "long" - }, - isManager = new - { - type = "boolean" - }, - lastName = new - { - type = "string" - }, - salary = new - { - type = "integer" - } - }, - type = "nested" - }, - firstName = new - { - fields = new - { - firstNameRaw = new - { - index = "not_analyzed", - type = "string" - }, - length = new - { - type = "token_count", - analyzer = "standard" - } - }, - type = "string" - }, - isManager = new - { - null_value = false, - store = true, - type = "boolean" - }, - lastName = new - { - type = "string" - }, - salary = new - { - ignore_malformed = false, - type = "double" - } - } - } - } - }; -Expect(expected).WhenSerializing((ICreateIndexRequest) descriptor); ----- -[source, csharp] ----- -[ElasticsearchType(Name = "company")] -public class CompanyWithAttributesAndPropertiesToIgnore -{ - public string Name { get; set; } - - [String(Ignore = true)] - public string PropertyToIgnore { get; set; } - - public string AnotherPropertyToIgnore { get; set; } - - [JsonIgnore] - public string JsonIgnoredProperty { get; set; } -} ----- -== Ignoring Properties -Properties on a POCO can be ignored in a few ways: - - - -- Using the `Ignore` property on a derived `ElasticsearchPropertyAttribute` type applied to the property that should be ignored on the POCO - - - -- Using the `.InferMappingFor(Func, IClrTypeMapping> selector)` on the connection settings - - - -- Using an ignore attribute applied to the POCO property that is understood by the `IElasticsearchSerializer` used and inspected inside of `CreatePropertyMapping()` on the serializer. In the case of the default `JsonNetSerializer`, this is the Json.NET `JsonIgnoreAttribute` - - - -This example demonstrates all ways, using the attribute way to ignore the property `PropertyToIgnore`, the infer mapping way to ignore the -property `AnotherPropertyToIgnore` and the json serializer specific attribute way to ignore the property `JsonIgnoredProperty` - - -[source, csharp] ----- -var descriptor = new CreateIndexDescriptor("myindex") - .Mappings(ms => ms - .Map(m => m - .AutoMap() - ) - ); -var expected = new - { - mappings = new - { - company = new - { - properties = new - { - name = new - { - type = "string" - } - } - } - } - }; -var settings = WithConnectionSettings(s => s - .InferMappingFor(i => i - .Ignore(p => p.AnotherPropertyToIgnore) - ) - ); -settings.Expect(expected).WhenSerializing((ICreateIndexRequest) descriptor); ----- -If you notice in our previous Company/Employee examples, the Employee type is recursive -in that itself contains a collection of type `Employee`. By default, `.AutoMap()` will only -traverse a single depth when it encounters recursive instances like this. Hence, in the -previous examples, the second level of Employee did not get any of its properties mapped. -This is done as a safe-guard to prevent stack overflows and all the fun that comes with -infinite recursion. Additionally, in most cases, when it comes to Elasticsearch mappings, it is -often an edge case to have deeply nested mappings like this. However, you may still have -the need to do this, so you can control the recursion depth of AutoMap(). -Let's introduce a very simple class A, to reduce the noise, which itself has a property -Child of type A. - -[source, csharp] ----- -public class A -{ - public A Child { get; set; } -} ----- -By default, AutoMap() only goes as far as depth 1 - -[source, csharp] ----- -var descriptor = new CreateIndexDescriptor("myindex") - .Mappings(ms => ms - .Map(m => m.AutoMap()) - ); ----- -Thus we do not map properties on the second occurrence of our Child property - -[source, csharp] ----- -var expected = new -{ - mappings = new - { - a = new - { - properties = new - { - child = new - { - properties = new { }, - type = "object" - } - } - } - } -}; ----- -[source, csharp] ----- -Expect(expected).WhenSerializing((ICreateIndexRequest) descriptor); ----- -Now lets specify a maxRecursion of 3 - -[source, csharp] ----- -var withMaxRecursionDescriptor = new CreateIndexDescriptor("myindex") - .Mappings(ms => ms - .Map(m => m.AutoMap(3)) - ); ----- -AutoMap() has now mapped three levels of our Child property - -[source, csharp] ----- -var expectedWithMaxRecursion = new -{ - mappings = new - { - a = new - { - properties = new - { - child = new - { - type = "object", - properties = new - { - child = new - { - type = "object", - properties = new - { - child = new - { - type = "object", - properties = new - { - child = new - { - type = "object", - properties = new { } - } - } - } - } - } - } - } - } - } - } -}; ----- -[source, csharp] ----- -Expect(expectedWithMaxRecursion).WhenSerializing((ICreateIndexRequest) withMaxRecursionDescriptor); ----- -Now we can pass an instance of our custom visitor to AutoMap() - -[source, csharp] ----- -var descriptor = new CreateIndexDescriptor("myindex") - .Mappings(ms => ms - .Map(m => m.AutoMap(new DisableDocValuesPropertyVisitor())) - ); ----- -and anytime it maps a property as a number (INumberProperty) or boolean (IBooleanProperty) -it will apply the transformation defined in each Visit() respectively, which in this example -disables doc values. - -[source, csharp] ----- -var expected = new -{ - mappings = new - { - employee = new - { - properties = new - { - birthday = new - { - type = "date" - }, - employees = new - { - properties = new { }, - type = "object" - }, - firstName = new - { - type = "string" - }, - isManager = new - { - doc_values = false, - type = "boolean" - }, - lastName = new - { - type = "string" - }, - salary = new - { - doc_values = false, - type = "integer" - } - } - } - } -}; ----- -[source, csharp] ----- -var descriptor = new CreateIndexDescriptor("myindex") - .Mappings(ms => ms - .Map(m => m.AutoMap(new EverythingIsAStringPropertyVisitor())) - ); -var expected = new - { - mappings = new - { - employee = new - { - properties = new - { - birthday = new - { - type = "string" - }, - employees = new - { - type = "string" - }, - firstName = new - { - type = "string" - }, - isManager = new - { - type = "string" - }, - lastName = new - { - type = "string" - }, - salary = new - { - type = "string" - } - } - } - } - }; ----- diff --git a/docs/asciidoc/ClientConcepts/LowLevel/Connecting.doc.asciidoc b/docs/asciidoc/ClientConcepts/LowLevel/Connecting.doc.asciidoc deleted file mode 100644 index 41ac4fd0d1b..00000000000 --- a/docs/asciidoc/ClientConcepts/LowLevel/Connecting.doc.asciidoc +++ /dev/null @@ -1,291 +0,0 @@ -# Connecting -Connecting to *Elasticsearch* with `Elasticsearch.Net` is quite easy but has a few toggles and options worth knowing. - -# Choosing the right connection strategy -If you simply new an `ElasticLowLevelClient`, it will be a non-failover connection to `http://localhost:9200` - - -[source, csharp] ----- -var client = new ElasticLowLevelClient(); -var tokenizers = new TokenizersDescriptor(); ----- - -If your Elasticsearch node does not live at `http://localhost:9200` but i.e `http://mynode.example.com:8082/apiKey`, then -you will need to pass in some instance of `IConnectionConfigurationValues`. - -The easiest way to do this is: - - -[source, csharp] ----- -var node = new Uri("http://mynode.example.com:8082/apiKey"); -var config = new ConnectionConfiguration(node); -var client = new ElasticLowLevelClient(config); ----- - -This however is still a non-failover connection. Meaning if that `node` goes down the operation will not be retried on any other nodes in the cluster. - -To get a failover connection we have to pass an `IConnectionPool` instance instead of a `Uri`. - - -[source, csharp] ----- -var node = new Uri("http://mynode.example.com:8082/apiKey"); -var connectionPool = new SniffingConnectionPool(new[] { node }); -var config = new ConnectionConfiguration(connectionPool); -var client = new ElasticLowLevelClient(config); ----- - -Here instead of directly passing `node`, we pass a `SniffingConnectionPool` which will use our `node` to find out the rest of the available cluster nodes. -Be sure to read more about [Connection Pooling and Cluster Failover here](/elasticsearch-net/cluster-failover.html) - -## Options - -Besides either passing a `Uri` or `IConnectionPool` to `ConnectionConfiguration`, you can also fluently control many more options. For instance: - - -[source, csharp] ----- -var node = new Uri("http://mynode.example.com:8082/apiKey"); -var connectionPool = new SniffingConnectionPool(new[] { node }); -var config = new ConnectionConfiguration(connectionPool) - .DisableDirectStreaming() - .BasicAuthentication("user", "pass") - .RequestTimeout(TimeSpan.FromSeconds(5)); ----- - -The following is a list of available connection configuration options: - - -[source, csharp] ----- -var client = new ElasticLowLevelClient(); ----- -[source, csharp] ----- -var config = new ConnectionConfiguration() - - .DisableAutomaticProxyDetection() ----- -Disable automatic proxy detection. Defaults to true. - -[source, csharp] ----- -.EnableHttpCompression() ----- -Enable compressed request and reesponses from Elasticsearch (Note that nodes need to be configured -to allow this. See the [http module settings](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-http.html) for more info). - -[source, csharp] ----- -.DisableDirectStreaming() ----- -By default responses are deserialized off stream to the object you tell it to. -For debugging purposes it can be very useful to keep a copy of the raw response on the result object. - -[source, csharp] ----- -var result = client.Search>(new { size = 12 }); -var raw = result.ResponseBodyInBytes; ----- -This will only have a value if the client configuration has ExposeRawResponse set - -[source, csharp] ----- -var stringResult = client.Search(new { }); ----- - -Please note that this only make sense if you need a mapped response and the raw response at the same time. -If you need a `string` or `byte[]` response simply call: - -[source, csharp] ----- -config = config - //endhide - .GlobalQueryStringParameters(new NameValueCollection()) ----- -Allows you to set querystring parameters that have to be added to every request. For instance, if you use a hosted elasticserch provider, and you need need to pass an `apiKey` parameter onto every request. - -[source, csharp] ----- -.Proxy(new Uri("http://myproxy"), "username", "pass") ----- -Sets proxy information on the connection. - -[source, csharp] ----- -.RequestTimeout(TimeSpan.FromSeconds(4)) ----- -Sets the global maximum time a connection may take. -Please note that this is the request timeout, the builtin .NET `WebRequest` has no way to set connection timeouts -(see http://msdn.microsoft.com/en-us/library/system.net.httpwebrequest.timeout(v=vs.110).aspx). - -[source, csharp] ----- -.ThrowExceptions() ----- -As an alternative to the C/go like error checking on `response.IsValid`, you can instead tell the client to throw -exceptions. -There are three category of exceptions thay may be thrown: - -1) ElasticsearchClientException: These are known exceptions, either an exception that occurred in the request pipeline -(such as max retries or timeout reached, bad authentication, etc...) or Elasticsearch itself returned an error (could -not parse the request, bad query, missing field, etc...). If it is an Elasticsearch error, the `ServerError` property -on the response will contain the the actual error that was returned. The inner exception will always contain the -root causing exception. - -2) UnexpectedElasticsearchClientException: These are unknown exceptions, for instance a response from Elasticsearch not -properly deserialized. These are usually bugs and should be reported. This excpetion also inherits from ElasticsearchClientException -so an additional catch block isn't necessary, but can be helpful in distinguishing between the two. -3) Development time exceptions: These are CLR exceptions like ArgumentException, NullArgumentException etc... that are thrown -when an API in the client is misused. These should not be handled as you want to know about them during development. - -[source, csharp] ----- -.PrettyJson() ----- -Forces all serialization to be indented and appends `pretty=true` to all the requests so that the responses are indented as well - -[source, csharp] ----- -.BasicAuthentication("username", "password") ----- -Sets the HTTP basic authentication credentials to specify with all requests. - -**Note:** This can alternatively be specified on the node URI directly: - -[source, csharp] ----- -var uri = new Uri("http://username:password@localhost:9200"); ----- -[source, csharp] ----- -var settings = new ConnectionConfiguration(uri); ----- - -...but may become tedious when using connection pooling with multiple nodes. - - - -You can pass a callback of type `Action` that can eaves drop every time a response (good or bad) is created. -If you have complex logging needs this is a good place to add that in. - - -[source, csharp] ----- -var counter = 0; -var connectionPool = new SingleNodeConnectionPool(new Uri("http://localhost:9200")); -var settings = new ConnectionSettings(connectionPool, new InMemoryConnection()) - .OnRequestCompleted(r => counter++); -var client = new ElasticClient(settings); -client.RootNodeInfo(); -counter.Should().Be(1); -client.RootNodeInfoAsync(); -counter.Should().Be(2); ----- - -An example of using `OnRequestCompleted()` for complex logging. Remember, if you would also like -to capture the request and/or response bytes, you also need to set `.DisableDirectStreaming()` -to `true` - - -[source, csharp] ----- -var list = new List(); -var connectionPool = new SingleNodeConnectionPool(new Uri("http://localhost:9200")); -var settings = new ConnectionSettings(connectionPool, new InMemoryConnection()) - .DisableDirectStreaming() - .OnRequestCompleted(response => - { - // log out the request - if (response.RequestBodyInBytes != null) - { - list.Add( - $"{response.HttpMethod} {response.Uri} \n" + - $"{Encoding.UTF8.GetString(response.RequestBodyInBytes)}"); - } - else - { - list.Add($"{response.HttpMethod} {response.Uri}"); - } - - // log out the response - if (response.ResponseBodyInBytes != null) - { - list.Add($"Status: {response.HttpStatusCode}\n" + - $"{Encoding.UTF8.GetString(response.ResponseBodyInBytes)}\n" + - $"{new string('-', 30)}\n"); - } - else - { - list.Add($"Status: {response.HttpStatusCode}\n" + - $"{new string('-', 30)}\n"); - } - }); -list.Add( - $"{response.HttpMethod} {response.Uri} \n" + - $"{Encoding.UTF8.GetString(response.RequestBodyInBytes)}"); -list.Add($"{response.HttpMethod} {response.Uri}"); -list.Add($"Status: {response.HttpStatusCode}\n" + - $"{Encoding.UTF8.GetString(response.ResponseBodyInBytes)}\n" + - $"{new string('-', 30)}\n"); -list.Add($"Status: {response.HttpStatusCode}\n" + - $"{new string('-', 30)}\n"); -var client = new ElasticClient(settings); -var syncResponse = client.Search(s => s - .Scroll("2m") - .Sort(ss => ss - .Ascending(SortSpecialField.DocumentIndexOrder) - ) - ); -list.Count.Should().Be(2); -var asyncResponse = await client.SearchAsync(s => s - .Scroll("2m") - .Sort(ss => ss - .Ascending(SortSpecialField.DocumentIndexOrder) - ) - ); -list.Count.Should().Be(4); -list.ShouldAllBeEquivalentTo(new [] - { - "POST http://localhost:9200/_search?scroll=2m \n{\"sort\":[{\"_doc\":{\"order\":\"asc\"}}]}", - "Status: 200\n------------------------------\n", - "POST http://localhost:9200/_search?scroll=2m \n{\"sort\":[{\"_doc\":{\"order\":\"asc\"}}]}", - "Status: 200\n------------------------------\n" - }); ----- -## Configuring SSL -SSL must be configured outside of the client using .NET's -[ServicePointManager](http://msdn.microsoft.com/en-us/library/system.net.servicepointmanager%28v=vs.110%29.aspx) -class and setting the [ServerCertificateValidationCallback](http://msdn.microsoft.com/en-us/library/system.net.servicepointmanager.servercertificatevalidationcallback.aspx) -property. - -The bare minimum to make .NET accept self-signed SSL certs that are not in the Window's CA store would be to have the callback simply return `true`: - -[source, csharp] ----- -ServicePointManager.ServerCertificateValidationCallback += (sender, cert, chain, errors) => true; ----- - -However, this will accept all requests from the AppDomain to untrusted SSL sites, -therefore we recommend doing some minimal introspection on the passed in certificate. - - - -You can then register a factory on ConnectionSettings to create an instance of your subclass instead. -This is called once per instance of ConnectionSettings. - - -[source, csharp] ----- -var connectionPool = new SingleNodeConnectionPool(new Uri("http://localhost:9200")); -var settings = new ConnectionSettings(connectionPool, new InMemoryConnection(), s => new MyJsonNetSerializer(s)); -var client = new ElasticClient(settings); -client.RootNodeInfo(); -client.RootNodeInfo(); -var serializer = ((IConnectionSettingsValues)settings).Serializer as MyJsonNetSerializer; -serializer.CallToModify.Should().BeGreaterThan(0); -serializer.SerializeToString(new Project { }); -serializer.CallToContractConverter.Should().BeGreaterThan(0); ----- diff --git a/docs/asciidoc/ClientConcepts/LowLevel/Lifetimes.doc.asciidoc b/docs/asciidoc/ClientConcepts/LowLevel/Lifetimes.doc.asciidoc deleted file mode 100644 index 2ea18a0d78a..00000000000 --- a/docs/asciidoc/ClientConcepts/LowLevel/Lifetimes.doc.asciidoc +++ /dev/null @@ -1,38 +0,0 @@ - -## Lifetimes - -If you are using an IOC container its always useful to know the best practices around the lifetime of your objects - -In general we advise folks to register their ElasticClient instances as singleton. The client is thread safe -so sharing this instance over threads is ok. - -Zooming in however the actual moving part that benefits the most of being static for most of the duration of your -application is ConnectionSettings. Caches are per ConnectionSettings. - -In some applications it could make perfect sense to have multiple singleton IElasticClient's registered with different -connectionsettings. e.g if you have 2 functionally isolated Elasticsearch clusters. - - - -[source, csharp] ----- -var connection = new AConnection(); -var connectionPool = new AConnectionPool(new Uri("http://localhost:9200")); -var settings = new AConnectionSettings(connectionPool, connection); -settings.IsDisposed.Should().BeFalse(); -connectionPool.IsDisposed.Should().BeFalse(); -connection.IsDisposed.Should().BeFalse(); ----- - -Disposing the ConnectionSettings will dispose the IConnectionPool and IConnection it has a hold of - - -[source, csharp] ----- -var connection = new AConnection(); -var connectionPool = new AConnectionPool(new Uri("http://localhost:9200")); -var settings = new AConnectionSettings(connectionPool, connection); -settings.IsDisposed.Should().BeTrue(); -connectionPool.IsDisposed.Should().BeTrue(); -connection.IsDisposed.Should().BeTrue(); ----- diff --git a/docs/asciidoc/ClientConcepts/LowLevel/PostData.doc.asciidoc b/docs/asciidoc/ClientConcepts/LowLevel/PostData.doc.asciidoc deleted file mode 100644 index b174112033f..00000000000 --- a/docs/asciidoc/ClientConcepts/LowLevel/PostData.doc.asciidoc +++ /dev/null @@ -1,110 +0,0 @@ -# Post data -The low level allows you to post a string, byte[] array directly. On top of this if you pass a list of strings or objects -they will be serialized in Elasticsearch's special bulk/multi format. - - -Even though the argument for postData on the low level client takes a PostData -You can rely on C# implicit conversion to abstract the notion of PostData completely. -You can implicitly convert from the following types. - -[source, csharp] ----- -var fromString = ImplicitlyConvertsFrom(@string); ----- -[source, csharp] ----- -var fromByteArray = ImplicitlyConvertsFrom(bytes); -var fromListOfString = ImplicitlyConvertsFrom(listOfStrings); -var fromListOfObject = ImplicitlyConvertsFrom(listOfObjects); -var fromObject = ImplicitlyConvertsFrom(@object); ----- -postData Bytes will always be set if it originated from a byte - -[source, csharp] ----- -fromByteArray.WrittenBytes.Should().BeSameAs(bytes); ----- -[source, csharp] ----- -fromString.Type.Should().Be(PostType.LiteralString); -fromByteArray.Type.Should().Be(PostType.ByteArray); -fromListOfString.Type.Should().Be(PostType.EnumerableOfString); -fromListOfObject.Type.Should().Be(PostType.EnumerableOfObject); -fromObject.Type.Should().Be(PostType.Serializable); -fromString = ImplicitlyConvertsFrom(fromString); -fromByteArray = ImplicitlyConvertsFrom(fromByteArray); -fromListOfString = ImplicitlyConvertsFrom(fromListOfString); -fromListOfObject = ImplicitlyConvertsFrom(fromListOfObject); -fromObject = ImplicitlyConvertsFrom(fromObject); -fromString.Type.Should().Be(PostType.LiteralString); -fromByteArray.Type.Should().Be(PostType.ByteArray); -fromListOfString.Type.Should().Be(PostType.EnumerableOfString); -fromListOfObject.Type.Should().Be(PostType.EnumerableOfObject); -fromObject.Type.Should().Be(PostType.Serializable); -await this.AssertOn(new ConnectionSettings()); -await this.AssertOn(new ConnectionConfiguration()); ----- -Although each implicitly types behaves slightly differently - -[source, csharp] ----- -await Post(()=>@string, writes: Utf8Bytes(@string), storesBytes: true, settings: settings); ----- -[source, csharp] ----- -await Post(()=>bytes, writes: bytes, storesBytes: true, settings: settings); ----- -When passing a list of strings we assume its a list of valid serialized json that we -join with newlinefeeds making sure there is a trailing linefeed - -[source, csharp] ----- -await Post(()=>listOfStrings, writes: multiStringJson, storesBytes: true, settings: settings); ----- -When passing a list of object we assume its a list of objects we need to serialize -individually to json and join with newlinefeeds aking sure there is a trailing linefeed - -[source, csharp] ----- -await Post(()=>listOfObjects, writes: multiObjectJson, storesBytes: false, settings: settings); ----- -In all other cases postdata is serialized as is. - -[source, csharp] ----- -await Post(()=>@object, writes: objectJson, storesBytes: false, settings: settings); ----- -If you want to maintain a copy of the request that went out use the following settings - -[source, csharp] ----- -settings = new ConnectionSettings().DisableDirectStreaming(); ----- -by forcing `DisableDirectStreaming` serializing happens first in a private MemoryStream -so we can get a hold of the serialized bytes - -[source, csharp] ----- -await Post(()=>listOfObjects, writes: multiObjectJson, storesBytes: true, settings: settings); ----- -this behavior can also be observed when serializing a simple object using `DisableDirectStreaming` - -[source, csharp] ----- -await Post(()=>@object, writes: objectJson, storesBytes: true, settings: settings); ----- -[source, csharp] ----- -PostAssert(postData(), writes, storesBytes, settings); -await PostAssertAsync(postData(), writes, storesBytes, settings); -postData.Write(ms, settings); -var sentBytes = ms.ToArray(); -sentBytes.Should().Equal(writes); -postData.WrittenBytes.Should().NotBeNull(); -postData.WrittenBytes.Should().BeNull(); -await postData.WriteAsync(ms, settings); -var sentBytes = ms.ToArray(); -sentBytes.Should().Equal(writes); -postData.WrittenBytes.Should().NotBeNull(); -postData.WrittenBytes.Should().BeNull(); ----- diff --git a/docs/asciidoc/CodeStandards/Descriptors.doc.asciidoc b/docs/asciidoc/CodeStandards/Descriptors.doc.asciidoc deleted file mode 100644 index 42149e5985e..00000000000 --- a/docs/asciidoc/CodeStandards/Descriptors.doc.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ - -Every descriptor should inherit from `DescriptorBase`, this hides object members from the fluent interface - - -[source, csharp] ----- -var notDescriptors = new[] { typeof(ClusterProcessOpenFileDescriptors).Name, "DescriptorForAttribute" }; -var descriptors = from t in typeof(DescriptorBase<,>).Assembly().Types() - where t.IsClass() - && t.Name.Contains("Descriptor") - && !notDescriptors.Contains(t.Name) - && !t.GetInterfaces().Any(i => i == typeof(IDescriptor)) - select t.FullName; -descriptors.Should().BeEmpty(); ----- - -Methods taking a func should have that func return an interface - - -[source, csharp] ----- -var descriptors = - from t in typeof(DescriptorBase<,>).Assembly().Types() - where t.IsClass() && typeof(IDescriptor).IsAssignableFrom(t) - select t; -var selectorMethods = - from d in descriptors - from m in d.GetMethods() - let parameters = m.GetParameters() - from p in parameters - let type = p.ParameterType - let isGeneric = type.IsGeneric() - where isGeneric - let isFunc = type.GetGenericTypeDefinition() == typeof(Func<,>) - where isFunc - let firstFuncArg = type.GetGenericArguments().First() - let secondFuncArg = type.GetGenericArguments().Last() - let isQueryFunc = firstFuncArg.IsGeneric() && - firstFuncArg.GetGenericTypeDefinition() == typeof(QueryContainerDescriptor<>) && - typeof(QueryContainer).IsAssignableFrom(secondFuncArg) - where !isQueryFunc - let isFluentDictionaryFunc = - firstFuncArg.IsGeneric() && - firstFuncArg.GetGenericTypeDefinition() == typeof(FluentDictionary<,>) && - secondFuncArg.IsGeneric() && - secondFuncArg.GetGenericTypeDefinition() == typeof(FluentDictionary<,>) - where !isFluentDictionaryFunc - let lastArgIsNotInterface = !secondFuncArg.IsInterface() - where lastArgIsNotInterface - select $"{m.Name} on {m.DeclaringType.Name}"; -selectorMethods.Should().BeEmpty(); ----- diff --git a/docs/asciidoc/CodeStandards/ElasticClient.doc.asciidoc b/docs/asciidoc/CodeStandards/ElasticClient.doc.asciidoc deleted file mode 100644 index d8e7ee25432..00000000000 --- a/docs/asciidoc/CodeStandards/ElasticClient.doc.asciidoc +++ /dev/null @@ -1,54 +0,0 @@ -[source, csharp] ----- -var fluentParametersNotNamedSelector = - from m in typeof (IElasticClient).GetMethods() - from p in m.GetParameters() - where p.ParameterType.BaseType() == typeof (MulticastDelegate) - where !p.Name.Equals("selector") - select $"method '{nameof(IElasticClient)}.{m.Name}' should have parameter name of 'selector' but has a name of '{p.Name}'"; -fluentParametersNotNamedSelector.Should().BeEmpty(); -var requestParametersNotNamedRequest = - from m in typeof(IElasticClient).GetMethods() - from p in m.GetParameters() - where typeof(IRequest).IsAssignableFrom(p.ParameterType) - where !p.Name.Equals("request") - select $"method '{nameof(IElasticClient)}.{m.Name}' should have parameter name of 'request' but has a name of '{p.Name}'"; -requestParametersNotNamedRequest.Should().BeEmpty(); -var requestParameters = - (from m in typeof(IElasticClient).GetMethods() - from p in m.GetParameters() - where typeof(IRequest).IsAssignableFrom(p.ParameterType) - select p).ToList(); -requestParameter.HasDefaultValue.Should().BeFalse(); -var concreteMethodParametersDoNotMatchInterface = new List(); -var interfaceMap = typeof(ElasticClient).GetInterfaceMap(typeof(IElasticClient)); -var indexOfInterfaceMethod = Array.IndexOf(interfaceMap.InterfaceMethods, interfaceMethodInfo); -var concreteMethod = interfaceMap.TargetMethods[indexOfInterfaceMethod]; -var concreteParameters = concreteMethod.GetParameters(); -var interfaceParameters = interfaceMethodInfo.GetParameters(); -var parameterInfo = concreteParameters[i]; -var interfaceParameter = interfaceParameters[i]; -parameterInfo.Name.Should().Be(interfaceParameter.Name); -concreteMethodParametersDoNotMatchInterface.Add( - $"'{interfaceParameter.Name}' parameter on concrete implementation of '{nameof(ElasticClient)}.{interfaceMethodInfo.Name}' to {(interfaceParameter.HasDefaultValue ? string.Empty : "NOT")} be optional"); -concreteMethodParametersDoNotMatchInterface.Should().BeEmpty(); -var methodGroups = - from methodInfo in typeof(IElasticClient).GetMethods() - where - typeof(IResponse).IsAssignableFrom(methodInfo.ReturnType) || - (methodInfo.ReturnType.IsGeneric() - && typeof(Task<>) == methodInfo.ReturnType.GetGenericTypeDefinition() - && typeof(IResponse).IsAssignableFrom(methodInfo.ReturnType.GetGenericArguments()[0])) - let method = new MethodWithRequestParameter(methodInfo) - group method by method.Name into methodGroup - select methodGroup; -var parameters = asyncMethod.MethodInfo.GetParameters(); -var syncMethod = methodGroup.First(g => - !g.IsAsync - && g.MethodType == asyncMethod.MethodType - && g.MethodInfo.GetParameters().Length == parameters.Length - && (!asyncMethod.MethodInfo.IsGenericMethod || - g.MethodInfo.GetGenericArguments().Length == asyncMethod.MethodInfo.GetGenericArguments().Length)); -asyncMethod.Parameter.HasDefaultValue.Should().Be(syncMethod.Parameter.HasDefaultValue, - $"sync and async versions of {asyncMethod.MethodType} '{nameof(ElasticClient)}{methodGroup.Key}' should match"); ----- diff --git a/docs/asciidoc/CodeStandards/NamingConventions.doc.asciidoc b/docs/asciidoc/CodeStandards/NamingConventions.doc.asciidoc deleted file mode 100644 index 79e6d70d120..00000000000 --- a/docs/asciidoc/CodeStandards/NamingConventions.doc.asciidoc +++ /dev/null @@ -1,128 +0,0 @@ -# Naming Conventions - -NEST uses the following naming conventions (with _some_ exceptions). - - -## Class Names - -Abstract class names should end with a `Base` suffix - - -[source, csharp] ----- -var exceptions = new[] - { - typeof(DateMath) - }; -var abstractClasses = typeof(IRequest).Assembly().GetTypes() - .Where(t => t.IsClass() && t.IsAbstract() && !t.IsSealed() && !exceptions.Contains(t)) - .Where(t => !t.Name.Split('`')[0].EndsWith("Base")) - .Select(t => t.Name.Split('`')[0]) - .ToList(); -abstractClasses.Should().BeEmpty(); ----- - -Class names that end with `Base` suffix are abstract - - -[source, csharp] ----- -var exceptions = new[] { typeof(DateMath) }; -var baseClassesNotAbstract = typeof(IRequest).Assembly().GetTypes() - .Where(t => t.IsClass() && !exceptions.Contains(t)) - .Where(t => t.Name.Split('`')[0].EndsWith("Base")) - .Where(t => !t.IsAbstractClass()) - .Select(t => t.Name.Split('`')[0]) - .ToList(); -baseClassesNotAbstract.Should().BeEmpty(); ----- -## Requests and Responses - -Request class names should end with `Request` - - -[source, csharp] ----- -var types = typeof(IRequest).Assembly().GetTypes(); -var requests = types - .Where(t => typeof(IRequest).IsAssignableFrom(t) && !t.IsAbstract()) - .Where(t => !typeof(IDescriptor).IsAssignableFrom(t)) - .Where(t => !t.Name.Split('`')[0].EndsWith("Request")) - .Select(t => t.Name.Split('`')[0]) - .ToList(); -requests.Should().BeEmpty(); ----- - -Response class names should end with `Response` - - -[source, csharp] ----- -var types = typeof(IRequest).Assembly().GetTypes(); -var responses = types - .Where(t => typeof(IResponse).IsAssignableFrom(t) && !t.IsAbstract()) - .Where(t => !t.Name.Split('`')[0].EndsWith("Response")) - .Select(t => t.Name.Split('`')[0]) - .ToList(); -responses.Should().BeEmpty(); ----- - -Request and Response class names should be one to one in *most* cases. -e.g. `ValidateRequest` => `ValidateResponse`, and not `ValidateQueryRequest` => `ValidateResponse` -There are a few exceptions to this rule, most notably the `Cat` prefixed requests and -`Exists` requests. - - -[source, csharp] ----- -var exceptions = new[] - { - typeof(CatAliasesRequest), - typeof(CatAllocationRequest), - typeof(CatCountRequest), - typeof(CatFielddataRequest), - typeof(CatHealthRequest), - typeof(CatHelpRequest), - typeof(CatIndicesRequest), - typeof(CatMasterRequest), - typeof(CatNodesRequest), - typeof(CatPendingTasksRequest), - typeof(CatPluginsRequest), - typeof(CatRecoveryRequest), - typeof(CatSegmentsRequest), - typeof(CatShardsRequest), - typeof(CatThreadPoolRequest), - typeof(DocumentExistsRequest), - typeof(DocumentExistsRequest<>), - typeof(AliasExistsRequest), - typeof(IndexExistsRequest), - typeof(TypeExistsRequest), - typeof(IndexTemplateExistsRequest), - typeof(SearchExistsRequest), - typeof(SearchExistsRequest<>), - typeof(SearchTemplateRequest), - typeof(SearchTemplateRequest<>), - typeof(ScrollRequest), - typeof(SourceRequest), - typeof(SourceRequest<>), - typeof(ValidateQueryRequest<>), - typeof(GetAliasRequest), - typeof(CatNodeattrsRequest), - typeof(IndicesShardStoresRequest), - typeof(RenderSearchTemplateRequest) - }; -var types = typeof(IRequest).Assembly().GetTypes(); -var requests = new HashSet(types - .Where(t => - t.IsClass() && - !t.IsAbstract() && - typeof(IRequest).IsAssignableFrom(t) && - !typeof(IDescriptor).IsAssignableFrom(t) - && !exceptions.Contains(t)) - .Select(t => t.Name.Split('`')[0].Replace("Request", "")) - ); -var responses = types - .Where(t => t.IsClass() && !t.IsAbstract() && typeof(IResponse).IsAssignableFrom(t)) - .Select(t => t.Name.Split('`')[0].Replace("Response", "")); -requests.Except(responses).Should().BeEmpty(); ----- diff --git a/docs/asciidoc/CodeStandards/Queries.doc.asciidoc b/docs/asciidoc/CodeStandards/Queries.doc.asciidoc deleted file mode 100644 index 821fa33f5be..00000000000 --- a/docs/asciidoc/CodeStandards/Queries.doc.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -[source, csharp] ----- -var properties = from p in QueryProperties - let a = p.GetCustomAttributes().Concat(p.GetCustomAttributes()) - where a.Count() != 1 - select p; -properties.Should().BeEmpty(); -var staticProperties = from p in typeof(Query<>).GetMethods() - let name = p.Name.StartsWith("GeoShape") ? "GeoShape" : p.Name - select name; -var placeHolders = QueryPlaceHolderProperties.Select(p => p.Name.StartsWith("GeoShape") ? "GeoShape" : p.Name); -staticProperties.Distinct().Should().Contain(placeHolders.Distinct()); -var fluentMethods = from p in typeof(QueryContainerDescriptor<>).GetMethods() - let name = p.Name.StartsWith("GeoShape") ? "GeoShape" : p.Name - select name; -var placeHolders = QueryPlaceHolderProperties.Select(p => p.Name.StartsWith("GeoShape") ? "GeoShape" : p.Name); -fluentMethods.Distinct().Should().Contain(placeHolders.Distinct()); -var skipQueryImplementations = new[] { typeof(IFieldNameQuery), typeof(IFuzzyQuery<,>), typeof(IConditionlessQuery) }; -var queries = typeof(IQuery).Assembly().ExportedTypes - .Where(t => t.IsInterface() && typeof(IQuery).IsAssignableFrom(t)) - .Where(t => !skipQueryImplementations.Contains(t)) - .ToList(); -queries.Should().NotBeEmpty(); -var visitMethods = typeof(IQueryVisitor).GetMethods().Where(m => m.Name == "Visit"); -visitMethods.Should().NotBeEmpty(); -var missingTypes = from q in queries - let visitMethod = visitMethods.FirstOrDefault(m => m.GetParameters().First().ParameterType == q) - where visitMethod == null - select q; -missingTypes.Should().BeEmpty(); ----- diff --git a/docs/asciidoc/CommonOptions/DateMath/DateMathExpressions.doc.asciidoc b/docs/asciidoc/CommonOptions/DateMath/DateMathExpressions.doc.asciidoc deleted file mode 100644 index dd78ae09ee8..00000000000 --- a/docs/asciidoc/CommonOptions/DateMath/DateMathExpressions.doc.asciidoc +++ /dev/null @@ -1,97 +0,0 @@ -# Date Expressions -The date type supports using date math expression when using it in a query/filter -Whenever durations need to be specified, eg for a timeout parameter, the duration can be specified - -The expression starts with an "anchor" date, which can be either now or a date string (in the applicable format) ending with ||. -It can then follow by a math expression, supporting +, - and / (rounding). -The units supported are y (year), M (month), w (week), d (day), h (hour), m (minute), and s (second). -as a whole number representing time in milliseconds, or as a time value like `2d` for 2 days. - -Be sure to read the elasticsearch documentation {ref}/mapping-date-format.html#date-math[on this subject here] - - - -You can create simple expressions using any of the static methods on `DateMath` - -[source, csharp] ----- -Expect("now").WhenSerializing(Nest.DateMath.Now); ----- -[source, csharp] ----- -Expect("2015-05-05T00:00:00").WhenSerializing(Nest.DateMath.Anchored(new DateTime(2015,05, 05))); ----- -strings implicitly convert to date maths - -[source, csharp] ----- -Expect("now").WhenSerializing("now"); ----- -but are lenient to bad math expressions - -[source, csharp] ----- -var nonsense = "now||*asdaqwe"; ----- -[source, csharp] ----- -Expect(nonsense).WhenSerializing(nonsense) ----- -the resulting date math will assume the whole string is the anchor - -[source, csharp] ----- -.Result(dateMath => ((IDateMath)dateMath) - .Anchor.Match( - d => d.Should().NotBe(default(DateTime)), - s => s.Should().Be(nonsense) - ) - ); ----- -date's also implicitly convert to simple date math expressions - -[source, csharp] ----- -var date = new DateTime(2015, 05, 05); ----- -[source, csharp] ----- -Expect("2015-05-05T00:00:00").WhenSerializing(date) ----- -the anchor will be an actual DateTime, even after a serialization - deserialization round trip - -[source, csharp] ----- -.Result(dateMath => ((IDateMath)dateMath) - . Anchor.Match( - d => d.Should().Be(date), - s => s.Should().BeNull() - ) - ); ----- -Ranges can be chained on to simple expressions - -[source, csharp] ----- -Expect("now+1d").WhenSerializing(Nest.DateMath.Now.Add("1d")); ----- -plural means that you can chain multiple - -[source, csharp] ----- -Expect("now+1d-1m").WhenSerializing(Nest.DateMath.Now.Add("1d").Subtract(TimeSpan.FromMinutes(1))); ----- -a rounding value can also be chained at the end afterwhich no more ranges can be appended - -[source, csharp] ----- -Expect("now+1d-1m/d").WhenSerializing(Nest.DateMath.Now.Add("1d").Subtract(TimeSpan.FromMinutes(1)).RoundTo(Nest.TimeUnit.Day)); ----- -When anchoring date's we need to append `||` as clear separator between anchor and ranges - -[source, csharp] ----- -Expect("2015-05-05T00:00:00||+1d-1m").WhenSerializing(Nest.DateMath.Anchored(new DateTime(2015,05,05)).Add("1d").Subtract(TimeSpan.FromMinutes(1))); ----- -plural means that you can chain multiple - diff --git a/docs/asciidoc/QueryDsl/BoolDsl/BoolDsl.doc.asciidoc b/docs/asciidoc/QueryDsl/BoolDsl/BoolDsl.doc.asciidoc deleted file mode 100644 index 9dde4bb05c8..00000000000 --- a/docs/asciidoc/QueryDsl/BoolDsl/BoolDsl.doc.asciidoc +++ /dev/null @@ -1,126 +0,0 @@ -Writing boolean queries can grow rather verbose rather quickly using the query DSL e.g - -[source, csharp] ----- -var searchResults = this.Client.Search(s => s - .Query(q => q - .Bool(b => b - .Should( - bs => bs.Term(p => p.Name, "x"), - bs => bs.Term(p => p.Name, "y") - ) - ) - ) - ); ----- -now this is just a single bool with only two clauses, imagine multiple nested bools this quickly becomes an exercise in -hadouken indenting - -[[indent]] -.hadouken indenting example -image::http://i.imgur.com/BtjZedW.jpg[dead indent] - - -For this reason, NEST introduces operator overloading so complex bool queries become easier to write, the previous example will become. - -[source, csharp] ----- -var searchResults = this.Client.Search(s => s - .Query(q => q.Term(p => p.Name, "x") || q.Term(p => p.Name, "y")) - ); ----- -Or using the object initializer syntax - -[source, csharp] ----- -searchResults = this.Client.Search(new SearchRequest -{ - Query = new TermQuery { Field = "name", Value= "x" } - || new TermQuery { Field = Field(p=>p.Name), Value = "y" } -}); ----- -A naive implementation of operator overloading would rewrite - -`term && term && term` to - -> bool -> |___must -> |___term -> |___bool -> |___must -> |___term -> |___term - -As you can image this becomes unwieldy quite fast the more complex a query becomes NEST can spot these and -join them together to become a single bool query - -> bool -> |___must -> |___term -> |___term -> |___term - - - -The bool DSL offers also a short hand notation to mark a query as a must_not using ! - -And to mark a query as a filter using + - -Both of these can be combined with ands to a single bool query - -When combining multiple queries some or all possibly marked as must_not or filter NEST still combines to a single bool query - -> bool -> |___must -> | |___term -> | |___term -> | |___term -> | -> |___must_not -> |___term - - -[source, csharp] ----- -Assert( - q => q.Query() && q.Query() && q.Query() && !q.Query(), - Query && Query && Query && !Query, - c=> - { - c.Bool.Must.Should().HaveCount(3); - c.Bool.MustNot.Should().HaveCount(1); - }); -c.Bool.Must.Should().HaveCount(3); -c.Bool.MustNot.Should().HaveCount(1); ----- -Even more involved `term && term && term && !term && +term && +term` still only results in a single bool query: - -> bool -> |___must -> | |___term -> | |___term -> | |___term -> | -> |___must_not -> | |___term -> | -> |___filter -> |___term -> |___term - - -You can still mix and match actual bool queries with the bool dsl e.g - -`bool(must=term, term, term) && !term` - -it would still merge into a single bool query. - -[source, csharp] ----- -c.Bool.Should.Should().HaveCount(2); -var nestedBool = c.Bool.Should.Cast().First(b=>!string.IsNullOrEmpty(b.Bool?.Name)); -nestedBool.Bool.Should.Should().HaveCount(1); -nestedBool.Bool.Name.Should().Be(firstName); -assert(fluent.InvokeQuery(new QueryContainerDescriptor())); -assert((QueryContainer)ois); ----- diff --git a/docs/asciidoc/QueryDsl/Geo/Distance/DistanceUnits.doc.asciidoc b/docs/asciidoc/QueryDsl/Geo/Distance/DistanceUnits.doc.asciidoc deleted file mode 100644 index c049e778115..00000000000 --- a/docs/asciidoc/QueryDsl/Geo/Distance/DistanceUnits.doc.asciidoc +++ /dev/null @@ -1,98 +0,0 @@ -# Distance Units -Whenever distances need to be specified, e.g. for a geo distance query, the distance unit can be specified -as a double number representing distance in meters, as a new instance of a `Distance`, or as a string -of the form number and distance unit e.g. `"2.72km"` - -## Using Distance units in NEST -NEST uses `Distance` to strongly type distance units and there are several ways to construct one. - -### Constructor -The most straight forward way to construct a `Distance` is through its constructor - - -[source, csharp] ----- -var unitComposed = new Nest.Distance(25); -var unitComposedWithUnits = new Nest.Distance(25, DistanceUnit.Meters); ----- -When serializing Distance constructed from a string, composition of distance value and unit - -[source, csharp] ----- -Expect("25.0m") - .WhenSerializing(unitComposed) - .WhenSerializing(unitComposedWithUnits); ----- - -### Implicit conversion -Alternatively a distance unit `string` can be assigned to a `Distance`, resulting in an implicit conversion to a new `Distance` instance. -If no `DistanceUnit` is specified, the default distance unit is meters - - -[source, csharp] ----- -Nest.Distance distanceString = "25"; -Nest.Distance distanceStringWithUnits = "25m"; -Expect(new Nest.Distance(25)) - .WhenSerializing(distanceString) - .WhenSerializing(distanceStringWithUnits); ----- - -### Supported units -A number of distance units are supported, from millimeters to nautical miles - - -Miles - -[source, csharp] ----- -Expect("0.62mi").WhenSerializing(new Nest.Distance(0.62, DistanceUnit.Miles)); ----- -Yards - -[source, csharp] ----- -Expect("9.0yd").WhenSerializing(new Nest.Distance(9, DistanceUnit.Yards)); ----- -Feet - -[source, csharp] ----- -Expect("3.33ft").WhenSerializing(new Nest.Distance(3.33, DistanceUnit.Feet)); ----- -Inches - -[source, csharp] ----- -Expect("43.23in").WhenSerializing(new Nest.Distance(43.23, DistanceUnit.Inch)); ----- -Kilometers - -[source, csharp] ----- -Expect("0.1km").WhenSerializing(new Nest.Distance(0.1, DistanceUnit.Kilometers)); ----- -Meters - -[source, csharp] ----- -Expect("400.0m").WhenSerializing(new Nest.Distance(400, DistanceUnit.Meters)); ----- -Centimeters - -[source, csharp] ----- -Expect("123.456cm").WhenSerializing(new Nest.Distance(123.456, DistanceUnit.Centimeters)); ----- -Millimeters - -[source, csharp] ----- -Expect("2.0mm").WhenSerializing(new Nest.Distance(2, DistanceUnit.Millimeters)); ----- -Nautical Miles - -[source, csharp] ----- -Expect("45.5nmi").WhenSerializing(new Nest.Distance(45.5, DistanceUnit.NauticalMiles)); ----- diff --git a/docs/asciidoc/aggregations-usage.asciidoc b/docs/asciidoc/aggregations-usage.asciidoc new file mode 100644 index 00000000000..b7fa337ed20 --- /dev/null +++ b/docs/asciidoc/aggregations-usage.asciidoc @@ -0,0 +1,98 @@ +:includes-from-dirs: aggregations/bucket,aggregations/metric,aggregations/pipeline + +include::../../docs/asciidoc/aggregations/bucket/children/children-aggregation-mapping.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/children/children-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/date-histogram/date-histogram-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/date-range/date-range-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/filter/filter-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/filters/filters-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/geo-distance/geo-distance-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/geo-hash-grid/geo-hash-grid-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/global/global-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/histogram/histogram-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/ip-range/ip-range-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/missing/missing-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/nested/nested-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/range/range-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/reverse-nested/reverse-nested-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/sampler/sampler-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/significant-terms/significant-terms-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/bucket/terms/terms-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/metric/average/average-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/metric/cardinality/cardinality-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/metric/extended-stats/extended-stats-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/metric/geo-bounds/geo-bounds-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/metric/max/max-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/metric/min/min-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/metric/percentile-ranks/percentile-ranks-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/metric/percentiles/percentiles-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/metric/scripted-metric/scripted-metric-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/metric/stats/stats-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/metric/sum/sum-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/metric/top-hits/top-hits-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/metric/value-count/value-count-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/average-bucket/average-bucket-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/bucket-script/bucket-script-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/bucket-selector/bucket-selector-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/cumulative-sum/cumulative-sum-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/derivative/derivative-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/extended-stats-bucket/extended-stats-bucket-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/max-bucket/max-bucket-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/min-bucket/min-bucket-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/moving-average/moving-average-ewma-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/moving-average/moving-average-holt-linear-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/moving-average/moving-average-holt-winters-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/moving-average/moving-average-linear-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/moving-average/moving-average-simple-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/percentiles-bucket/percentiles-bucket-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/serial-differencing/serial-differencing-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/stats-bucket/stats-bucket-aggregation-usage.asciidoc[] + +include::../../docs/asciidoc/aggregations/pipeline/sum-bucket/sum-bucket-aggregation-usage.asciidoc[] + diff --git a/docs/asciidoc/aggregations.asciidoc b/docs/asciidoc/aggregations.asciidoc new file mode 100644 index 00000000000..4c1fa014590 --- /dev/null +++ b/docs/asciidoc/aggregations.asciidoc @@ -0,0 +1,110 @@ +:output-dir: aggregations + +[[aggregations]] += Aggregations + +[partintro] +-- +Aggregations are arguably one of the most powerful features of Elasticsearch and NEST +exposes all of the available Aggregation types + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +-- + +include::{output-dir}/writing-aggregations.asciidoc[] + +include::aggregations-usage.asciidoc[] + diff --git a/docs/asciidoc/aggregations/bucket/children/children-aggregation-mapping.asciidoc b/docs/asciidoc/aggregations/bucket/children/children-aggregation-mapping.asciidoc new file mode 100644 index 00000000000..ffba370d1cb --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/children/children-aggregation-mapping.asciidoc @@ -0,0 +1,28 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[child-aggregation-mapping]] +== Child Aggregation Mapping + +To use the {ref_current}/search-aggregations-bucket-children-aggregation.html[Children Aggregation], +you have to make sure a `_parent` mapping is in place. + +Here we create the project index with two mapped types, `Project` and `CommitActivity` and +add a `_parent` mapping to `CommitActivity`, specifying the `Project` type as the parent + +[source,csharp] +---- +var createProjectIndex = TestClient.GetClient().CreateIndex(typeof(Project), c => c + .Mappings(map => map + .Map(tm => tm.AutoMap()) + .Map(tm => tm + .Parent() <1> + ) + ) +); +---- +<1> Set the parent of `CommitActivity` to the `Project` type + diff --git a/docs/asciidoc/aggregations/bucket/children/children-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/children/children-aggregation-usage.asciidoc new file mode 100644 index 00000000000..aa45bb449c9 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/children/children-aggregation-usage.asciidoc @@ -0,0 +1,70 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[children-aggregation-usage]] +== Children Aggregation Usage + +A special single bucket aggregation that enables aggregating from buckets on parent document types to +buckets on child documents. + +Be sure to read the Elasticsearch documentation on {ref_current}/search-aggregations-bucket-children-aggregation.html[Children Aggregation] + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(aggs => aggs + .Children("name_of_child_agg", child => child + .Aggregations(childAggs => childAggs + .Average("average_per_child", avg => avg.Field(p => p.ConfidenceFactor)) + .Max("max_per_child", avg => avg.Field(p => p.ConfidenceFactor)) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new ChildrenAggregation("name_of_child_agg", typeof(CommitActivity)) + { + Aggregations = + new AverageAggregation("average_per_child", "confidenceFactor") && + new MaxAggregation("max_per_child", "confidenceFactor") + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "name_of_child_agg": { + "children": { + "type": "commits" + }, + "aggs": { + "average_per_child": { + "avg": { + "field": "confidenceFactor" + } + }, + "max_per_child": { + "max": { + "field": "confidenceFactor" + } + } + } + } + } +} +---- + diff --git a/docs/asciidoc/aggregations/bucket/date-histogram/date-histogram-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/date-histogram/date-histogram-aggregation-usage.asciidoc new file mode 100644 index 00000000000..a0803389f50 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/date-histogram/date-histogram-aggregation-usage.asciidoc @@ -0,0 +1,145 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[date-histogram-aggregation-usage]] +== Date Histogram Aggregation Usage + +A multi-bucket aggregation similar to the histogram except it can only be applied on date values. +From a functionality perspective, this histogram supports the same features as the normal histogram. +The main difference is that the interval can be specified by date/time expressions. + +NOTE: When specifying a `format` **and** `extended_bounds`, in order for Elasticsearch to be able to parse +the serialized `DateTime` of `extended_bounds` correctly, the `date_optional_time` format is included +as part of the `format` value. + +Be sure to read the Elasticsearch documentation on {ref_current}/search-aggregations-bucket-datehistogram-aggregation.html[Date Histogram Aggregation]. + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(aggs => aggs + .DateHistogram("projects_started_per_month", date => date + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .MinimumDocumentCount(2) + .Format("yyyy-MM-dd'T'HH:mm:ss") + .ExtendedBounds(FixedDate.AddYears(-1), FixedDate.AddYears(1)) + .Order(HistogramOrder.CountAscending) + .Missing(FixedDate) + .Aggregations(childAggs => childAggs + .Nested("project_tags", n => n + .Path(p => p.Tags) + .Aggregations(nestedAggs => nestedAggs + .Terms("tags", avg => avg.Field(p => p.Tags.First().Name)) + ) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = Field(p => p.StartedOn), + Interval = DateInterval.Month, + MinimumDocumentCount = 2, + Format = "yyyy-MM-dd'T'HH:mm:ss", + ExtendedBounds = new ExtendedBounds + { + Minimum = FixedDate.AddYears(-1), + Maximum = FixedDate.AddYears(1), + }, + Order = HistogramOrder.CountAscending, + Missing = FixedDate, + Aggregations = new NestedAggregation("project_tags") + { + Path = Field(p => p.Tags), + Aggregations = new TermsAggregation("tags") + { + Field = Field(p => p.Tags.First().Name) + } + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month", + "min_doc_count": 2, + "format": "yyyy-MM-dd'T'HH:mm:ss||date_optional_time", + "order": { + "_count": "asc" + }, + "extended_bounds": { + "min": "2014-06-06T12:01:02.123", + "max": "2016-06-06T12:01:02.123" + }, + "missing": "2015-06-06T12:01:02.123" + }, + "aggs": { + "project_tags": { + "nested": { + "path": "tags" + }, + "aggs": { + "tags": { + "terms": { + "field": "tags.name" + } + } + } + } + } + } + } +} +---- + +=== Handling responses + +Using the `.Aggs` aggregation helper on `ISearchResponse`, we can fetch our aggregation results easily +in the correct type. <> + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); + +var dateHistogram = response.Aggs.DateHistogram("projects_started_per_month"); +dateHistogram.Should().NotBeNull(); +dateHistogram.Buckets.Should().NotBeNull(); +dateHistogram.Buckets.Count.Should().BeGreaterThan(10); + +foreach (var item in dateHistogram.Buckets) +{ + item.Date.Should().NotBe(default(DateTime)); + item.DocCount.Should().BeGreaterThan(0); + + var nested = item.Nested("project_tags"); + nested.Should().NotBeNull(); + + var nestedTerms = nested.Terms("tags"); + nestedTerms.Buckets.Count.Should().BeGreaterThan(0); +} +---- + diff --git a/docs/asciidoc/aggregations/bucket/date-range/date-range-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/date-range/date-range-aggregation-usage.asciidoc new file mode 100644 index 00000000000..db806ebc28d --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/date-range/date-range-aggregation-usage.asciidoc @@ -0,0 +1,117 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[date-range-aggregation-usage]] +== Date Range Aggregation Usage + +A range aggregation that is dedicated for date values. The main difference between this aggregation and the normal range aggregation is that the `from` +and `to` values can be expressed in `DateMath` expressions, and it is also possible to specify a date format by which the from and +to response fields will be returned. + +IMPORTANT: this aggregation includes the `from` value and excludes the `to` value for each range. + +Be sure to read the Elasticsearch documentation on {ref_current}/search-aggregations-bucket-daterange-aggregation.html[Date Range Aggregation] + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(aggs => aggs + .DateRange("projects_date_ranges", date => date + .Field(p => p.StartedOn) + .Ranges( + r => r.From(DateMath.Anchored(FixedDate).Add("2d")).To(DateMath.Now), + r => r.To(DateMath.Now.Add(TimeSpan.FromDays(1)).Subtract("30m").RoundTo(TimeUnit.Hour)), + r => r.From(DateMath.Anchored("2012-05-05").Add(TimeSpan.FromDays(1)).Subtract("1m")) + ) + .Aggregations(childAggs => childAggs + .Terms("project_tags", avg => avg.Field(p => p.Tags)) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new DateRangeAggregation("projects_date_ranges") + { + Field = Field(p => p.StartedOn), + Ranges = new List + { + new DateRangeExpression { From = DateMath.Anchored(FixedDate).Add("2d"), To = DateMath.Now}, + new DateRangeExpression { To = DateMath.Now.Add(TimeSpan.FromDays(1)).Subtract("30m").RoundTo(TimeUnit.Hour) }, + new DateRangeExpression { From = DateMath.Anchored("2012-05-05").Add(TimeSpan.FromDays(1)).Subtract("1m") } + }, + Aggregations = + new TermsAggregation("project_tags") { Field = Field(p => p.Tags) } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "projects_date_ranges": { + "date_range": { + "field": "startedOn", + "ranges": [ + { + "to": "now", + "from": "2015-06-06T12:01:02.123||+2d" + }, + { + "to": "now+1d-30m/h" + }, + { + "from": "2012-05-05||+1d-1m" + } + ] + }, + "aggs": { + "project_tags": { + "terms": { + "field": "tags" + } + } + } + } + } +} +---- + +=== Handling Responses + +Using the `.Agg` aggregation helper we can fetch our aggregation results easily +in the correct type. <> + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); + +var dateHistogram = response.Aggs.DateRange("projects_date_ranges"); +dateHistogram.Should().NotBeNull(); +dateHistogram.Buckets.Should().NotBeNull(); +---- + +We specified three ranges so we expect to have three of them in the response + +[source,csharp] +---- +dateHistogram.Buckets.Count.Should().Be(3); + +foreach (var item in dateHistogram.Buckets) +{ + item.DocCount.Should().BeGreaterThan(0); +} +---- + diff --git a/docs/asciidoc/aggregations/bucket/filter/filter-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/filter/filter-aggregation-usage.asciidoc new file mode 100644 index 00000000000..3c7d2f20504 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/filter/filter-aggregation-usage.asciidoc @@ -0,0 +1,145 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[filter-aggregation-usage]] +== Filter Aggregation Usage + +Defines a single bucket of all the documents in the current document set context that match a specified filter. +Often this will be used to narrow down the current aggregation context to a specific set of documents. + +Be sure to read the Elasticsearch documentation on {ref_current}/search-aggregations-bucket-filter-aggregation.html[Filter Aggregation] + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(aggs => aggs + .Filter("bethels_projects", date => date + .Filter(q => q.Term(p => p.LeadDeveloper.FirstName, FirstNameToFind)) + .Aggregations(childAggs => childAggs + .Terms("project_tags", avg => avg.Field(p => p.CuratedTags.First().Name.Suffix("keyword"))) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new FilterAggregation("bethels_projects") + { + Filter = new TermQuery {Field = Field(p => p.LeadDeveloper.FirstName), Value = FirstNameToFind}, + Aggregations = + new TermsAggregation("project_tags") { Field = Field(p => p.CuratedTags.First().Name.Suffix("keyword")) } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "bethels_projects": { + "filter": { + "term": { + "leadDeveloper.firstName": { + "value": "pierce" + } + } + }, + "aggs": { + "project_tags": { + "terms": { + "field": "curatedTags.name.keyword" + } + } + } + } + } +} +---- + +=== Handling Responses + +Using the `.Aggs` aggregation helper we can fetch our aggregation results easily +in the correct type. <> + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); + +var filterAgg = response.Aggs.Filter("bethels_projects"); +filterAgg.Should().NotBeNull(); +filterAgg.DocCount.Should().BeGreaterThan(0); +var tags = filterAgg.Terms("project_tags"); +tags.Should().NotBeNull(); +tags.Buckets.Should().NotBeEmpty(); +---- + +[[empty-filter]] +[float] +== Empty Filter + +When the collection of filters is empty or all are conditionless, NEST will serialize them +to an empty object. + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(aggs => aggs + .Filter("empty_filter", date => date + .Filter(f => f + .Bool(b => b + .Filter(new QueryContainer[0]) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new FilterAggregation("empty_filter") + { + Filter = new BoolQuery + { + Filter = new List() + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "empty_filter": { + "filter": {} + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +response.Aggs.Filter("empty_filter").DocCount.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/bucket/filters/filters-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/filters/filters-aggregation-usage.asciidoc new file mode 100644 index 00000000000..e7ed24c3089 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/filters/filters-aggregation-usage.asciidoc @@ -0,0 +1,351 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[filters-aggregation-usage]] +== Filters Aggregation Usage + +Defines a multi bucket aggregations where each bucket is associated with a filter. +Each bucket will collect all documents that match its associated filter. For documents +that do not match any filter, these will be collected in the _other bucket_. + +Be sure to read the Elasticsearch documentation {ref_current}/search-aggregations-bucket-filters-aggregation.html[Filters Aggregation] + +[[named-filters]] +[float] +== Named filters + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(aggs => aggs + .Filters("projects_by_state", agg => agg + .OtherBucket() + .OtherBucketKey("other_states_of_being") + .NamedFilters(filters => filters + .Filter("belly_up", f => f.Term(p => p.State, StateOfBeing.BellyUp)) + .Filter("stable", f => f.Term(p => p.State, StateOfBeing.Stable)) + .Filter("very_active", f => f.Term(p => p.State, StateOfBeing.VeryActive)) + ) + .Aggregations(childAggs => childAggs + .Terms("project_tags", avg => avg.Field(p => p.CuratedTags.First().Name.Suffix("keyword"))) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new FiltersAggregation("projects_by_state") + { + OtherBucket = true, + OtherBucketKey = "other_states_of_being", + Filters = new NamedFiltersContainer + { + { "belly_up", Query.Term(p=>p.State, StateOfBeing.BellyUp) }, + { "stable", Query.Term(p=>p.State, StateOfBeing.Stable) }, + { "very_active", Query.Term(p=>p.State, StateOfBeing.VeryActive) } + }, + Aggregations = + new TermsAggregation("project_tags") { Field = Field(p => p.CuratedTags.First().Name.Suffix("keyword")) } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "projects_by_state": { + "filters": { + "other_bucket": true, + "other_bucket_key": "other_states_of_being", + "filters": { + "belly_up": { + "term": { + "state": { + "value": "BellyUp" + } + } + }, + "stable": { + "term": { + "state": { + "value": "Stable" + } + } + }, + "very_active": { + "term": { + "state": { + "value": "VeryActive" + } + } + } + } + }, + "aggs": { + "project_tags": { + "terms": { + "field": "curatedTags.name.keyword" + } + } + } + } + } +} +---- + +=== Handling Responses + +Using the `.Agg` aggregation helper we can fetch our aggregation results easily +in the correct type. <> + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); + +var filterAgg = response.Aggs.Filters("projects_by_state"); +filterAgg.Should().NotBeNull(); +var namedResult = filterAgg.NamedBucket("belly_up"); +namedResult.Should().NotBeNull(); +namedResult.DocCount.Should().BeGreaterThan(0); +namedResult = filterAgg.NamedBucket("stable"); +namedResult.Should().NotBeNull(); +namedResult.DocCount.Should().BeGreaterThan(0); +namedResult = filterAgg.NamedBucket("very_active"); +namedResult.Should().NotBeNull(); +namedResult.DocCount.Should().BeGreaterThan(0); +namedResult = filterAgg.NamedBucket("other_states_of_being"); +namedResult.Should().NotBeNull(); +namedResult.DocCount.Should().Be(0); +---- + +[[anonymous-filters]] +[float] +== Anonymous filters + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(aggs => aggs + .Filters("projects_by_state", agg => agg + .OtherBucket() + .AnonymousFilters( + f => f.Term(p => p.State, StateOfBeing.BellyUp), + f => f.Term(p => p.State, StateOfBeing.Stable), + f => f.Term(p => p.State, StateOfBeing.VeryActive) + ) + .Aggregations(childAggs => childAggs + .Terms("project_tags", avg => avg.Field(p => p.CuratedTags.First().Name.Suffix("keyword"))) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new FiltersAggregation("projects_by_state") + { + OtherBucket = true, + Filters = new List + { + Query.Term(p=>p.State, StateOfBeing.BellyUp) , + Query.Term(p=>p.State, StateOfBeing.Stable) , + Query.Term(p=>p.State, StateOfBeing.VeryActive) + }, + Aggregations = + new TermsAggregation("project_tags") { Field = Field(p => p.CuratedTags.First().Name.Suffix("keyword")) } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "projects_by_state": { + "filters": { + "other_bucket": true, + "filters": [ + { + "term": { + "state": { + "value": "BellyUp" + } + } + }, + { + "term": { + "state": { + "value": "Stable" + } + } + }, + { + "term": { + "state": { + "value": "VeryActive" + } + } + } + ] + }, + "aggs": { + "project_tags": { + "terms": { + "field": "curatedTags.name.keyword" + } + } + } + } + } +} +---- + +=== Handling Responses + +Using the `.Agg` aggregation helper we can fetch our aggregation results easily +in the correct type. <> + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); + +var filterAgg = response.Aggs.Filters("projects_by_state"); +filterAgg.Should().NotBeNull(); +var results = filterAgg.AnonymousBuckets(); +results.Count.Should().Be(4); + +foreach (var singleBucket in results.Take(3)) +{ + singleBucket.DocCount.Should().BeGreaterThan(0); +} + +results.Last().DocCount.Should().Be(0); <1> +---- +<1> The last bucket is the _other bucket_ + +[[empty-filters]] +[float] +== Empty Filters + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(aggs => aggs + .Filters("empty_filters", agg => agg + .AnonymousFilters() + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new FiltersAggregation("empty_filters") + { + Filters = new List() + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "empty_filters": { + "filters": { + "filters": [] + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +response.Aggs.Filters("empty_filters").Buckets.Should().BeEmpty(); +---- + +[[conditionless-filters]] +[float] +== Conditionless Filters + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(aggs => aggs + .Filters("conditionless_filters", agg => agg + .AnonymousFilters( + q => new QueryContainer() + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new FiltersAggregation("conditionless_filters") + { + Filters = new List + { + new QueryContainer() + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "conditionless_filters": { + "filters": { + "filters": [] + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +response.Aggs.Filters("conditionless_filters").Buckets.Should().BeEmpty(); +---- + diff --git a/docs/asciidoc/aggregations/bucket/geo-distance/geo-distance-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/geo-distance/geo-distance-aggregation-usage.asciidoc new file mode 100644 index 00000000000..c02be78bf43 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/geo-distance/geo-distance-aggregation-usage.asciidoc @@ -0,0 +1,89 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-distance-aggregation-usage]] +== Geo Distance Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .GeoDistance("rings_around_amsterdam", g => g + .Field(p => p.Location) + .Origin(52.376, 4.894) + .Ranges( + r => r.To(100), + r => r.From(100).To(300), + r => r.From(300) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new GeoDistanceAggregation("rings_around_amsterdam") + { + Field = Field((Project p) => p.Location), + Origin = "52.376, 4.894", + Ranges = new List + { + new Nest.Range { To = 100 }, + new Nest.Range { From = 100, To = 300 }, + new Nest.Range { From = 300 } + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "rings_around_amsterdam": { + "geo_distance": { + "field": "location", + "origin": { + "lat": 52.376, + "lon": 4.894 + }, + "ranges": [ + { + "to": 100.0 + }, + { + "from": 100.0, + "to": 300.0 + }, + { + "from": 300.0 + } + ] + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var ringsAroundAmsterdam = response.Aggs.GeoDistance("rings_around_amsterdam"); +ringsAroundAmsterdam.Should().NotBeNull(); +ringsAroundAmsterdam.Buckets.Where(r => r.Key == "*-100.0").FirstOrDefault().Should().NotBeNull(); +ringsAroundAmsterdam.Buckets.Where(r => r.Key == "100.0-300.0").FirstOrDefault().Should().NotBeNull(); +ringsAroundAmsterdam.Buckets.Where(r => r.Key == "300.0-*").FirstOrDefault().Should().NotBeNull(); +---- + diff --git a/docs/asciidoc/aggregations/bucket/geo-hash-grid/geo-hash-grid-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/geo-hash-grid/geo-hash-grid-aggregation-usage.asciidoc new file mode 100644 index 00000000000..9eb3bdafbf6 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/geo-hash-grid/geo-hash-grid-aggregation-usage.asciidoc @@ -0,0 +1,66 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-hash-grid-aggregation-usage]] +== Geo Hash Grid Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .GeoHash("my_geohash_grid", g => g + .Field(p => p.Location) + .GeoHashPrecision(GeoHashPrecision.Precision3) + .Size(1000) + .ShardSize(100) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new GeoHashGridAggregation("my_geohash_grid") + { + Field = Field(p => p.Location), + Precision = GeoHashPrecision.Precision3, + Size = 1000, + ShardSize = 100 + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "my_geohash_grid": { + "geohash_grid": { + "field": "location", + "precision": 3, + "size": 1000, + "shard_size": 100 + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var myGeoHashGrid = response.Aggs.GeoHash("my_geohash_grid"); +myGeoHashGrid.Should().NotBeNull(); +---- + diff --git a/docs/asciidoc/aggregations/bucket/global/global-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/global/global-aggregation-usage.asciidoc new file mode 100644 index 00000000000..6bfd96b6823 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/global/global-aggregation-usage.asciidoc @@ -0,0 +1,71 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[global-aggregation-usage]] +== Global Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Global("all_projects", g => g + .Aggregations(aa => aa + .Terms("names", t => t + .Field(p => p.Name) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new GlobalAggregation("all_projects") + { + Aggregations = new TermsAggregation("names") + { + Field = Field(p => p.Name) + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "all_projects": { + "global": {}, + "aggs": { + "names": { + "terms": { + "field": "name" + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var allProjects = response.Aggs.Global("all_projects"); +allProjects.Should().NotBeNull(); +var names = allProjects.Terms("names"); +names.Should().NotBeNull(); +---- + diff --git a/docs/asciidoc/aggregations/bucket/histogram/histogram-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/histogram/histogram-aggregation-usage.asciidoc new file mode 100644 index 00000000000..5b7998033a7 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/histogram/histogram-aggregation-usage.asciidoc @@ -0,0 +1,71 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[histogram-aggregation-usage]] +== Histogram Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Histogram("commits", h => h + .Field(p => p.NumberOfCommits) + .Interval(100) + .Missing(0) + .Order(HistogramOrder.KeyDescending) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new HistogramAggregation("commits") + { + Field = Field(p => p.NumberOfCommits), + Interval = 100, + Missing = 0, + Order = HistogramOrder.KeyDescending + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "commits": { + "histogram": { + "field": "numberOfCommits", + "interval": 100.0, + "missing": 0.0, + "order": { + "_key": "desc" + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var commits = response.Aggs.Histogram("commits"); +commits.Should().NotBeNull(); + +foreach (var item in commits.Buckets) + item.DocCount.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/bucket/ip-range/ip-range-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/ip-range/ip-range-aggregation-usage.asciidoc new file mode 100644 index 00000000000..9865419766d --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/ip-range/ip-range-aggregation-usage.asciidoc @@ -0,0 +1,79 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[ip-range-aggregation-usage]] +== Ip Range Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .IpRange("ip_ranges", ip => ip + .Field(p => p.LeadDeveloper.IPAddress) + .Ranges( + r => r.To("10.0.0.5"), + r => r.From("10.0.0.5") + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new IpRangeAggregation("ip_ranges") + { + Field = Field((Project p) => p.LeadDeveloper.IPAddress), + Ranges = new List + { + new Nest.IpRange { To = "10.0.0.5" }, + new Nest.IpRange { From = "10.0.0.5" } + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "ip_ranges": { + "ip_range": { + "field": "leadDeveloper.iPAddress", + "ranges": [ + { + "to": "10.0.0.5" + }, + { + "from": "10.0.0.5" + } + ] + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var ipRanges = response.Aggs.IpRange("ip_ranges"); +ipRanges.Should().NotBeNull(); +ipRanges.Buckets.Should().NotBeNull(); +ipRanges.Buckets.Count.Should().BeGreaterThan(0); + +foreach (var range in ipRanges.Buckets) + range.DocCount.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/bucket/missing/missing-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/missing/missing-aggregation-usage.asciidoc new file mode 100644 index 00000000000..ace26cb5528 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/missing/missing-aggregation-usage.asciidoc @@ -0,0 +1,57 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[missing-aggregation-usage]] +== Missing Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Missing("projects_without_a_description", m => m + .Field(p => p.Description.Suffix("keyword")) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new MissingAggregation("projects_without_a_description") + { + Field = Field(p => p.Description.Suffix("keyword")) + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "projects_without_a_description": { + "missing": { + "field": "description.keyword" + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsWithoutDesc = response.Aggs.Missing("projects_without_a_description"); +projectsWithoutDesc.Should().NotBeNull(); +---- + diff --git a/docs/asciidoc/aggregations/bucket/nested/nested-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/nested/nested-aggregation-usage.asciidoc new file mode 100644 index 00000000000..7da85b5c737 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/nested/nested-aggregation-usage.asciidoc @@ -0,0 +1,81 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[nested-aggregation-usage]] +== Nested Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Nested("tags", n => n + .Path(p => p.Tags) + .Aggregations(aa => aa + .Terms("tag_names", t => t + .Field(p => p.Tags.Suffix("name")) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new NestedAggregation("tags") + { + Path = "tags", + Aggregations = new TermsAggregation("tag_names") + { + Field = "tags.name" + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "tags": { + "nested": { + "path": "tags" + }, + "aggs": { + "tag_names": { + "terms": { + "field": "tags.name" + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var tags = response.Aggs.Nested("tags"); +tags.Should().NotBeNull(); +var tagNames = tags.Terms("tag_names"); +tagNames.Should().NotBeNull(); + +foreach(var item in tagNames.Buckets) +{ + item.Key.Should().NotBeNullOrEmpty(); + item.DocCount.Should().BeGreaterThan(0); +} +---- + diff --git a/docs/asciidoc/aggregations/bucket/range/range-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/range/range-aggregation-usage.asciidoc new file mode 100644 index 00000000000..54bbe77c892 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/range/range-aggregation-usage.asciidoc @@ -0,0 +1,84 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[range-aggregation-usage]] +== Range Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Range("commit_ranges", ra => ra + .Field(p => p.NumberOfCommits) + .Ranges( + r => r.To(100), + r => r.From(100).To(500), + r => r.From(500) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new RangeAggregation("commit_ranges") + { + Field = Field(p => p.NumberOfCommits), + Ranges = new List + { + { new Nest.Range { To = 100 } }, + { new Nest.Range { From = 100, To = 500 } }, + { new Nest.Range { From = 500 } } + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "commit_ranges": { + "range": { + "field": "numberOfCommits", + "ranges": [ + { + "to": 100.0 + }, + { + "from": 100.0, + "to": 500.0 + }, + { + "from": 500.0 + } + ] + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var commitRanges = response.Aggs.Range("commit_ranges"); +commitRanges.Should().NotBeNull(); +commitRanges.Buckets.Count.Should().Be(3); +commitRanges.Buckets.Where(r => r.Key == "*-100.0").FirstOrDefault().Should().NotBeNull(); +commitRanges.Buckets.Where(r => r.Key == "100.0-500.0").FirstOrDefault().Should().NotBeNull(); +commitRanges.Buckets.Where(r => r.Key == "500.0-*").FirstOrDefault().Should().NotBeNull(); +---- + diff --git a/docs/asciidoc/aggregations/bucket/reverse-nested/reverse-nested-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/reverse-nested/reverse-nested-aggregation-usage.asciidoc new file mode 100644 index 00000000000..96ad072a3e9 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/reverse-nested/reverse-nested-aggregation-usage.asciidoc @@ -0,0 +1,118 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[reverse-nested-aggregation-usage]] +== Reverse Nested Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Nested("tags", n => n + .Path(p => p.Tags) + .Aggregations(aa => aa + .Terms("tag_names", t => t + .Field(p => p.Tags.Suffix("name")) + .Aggregations(aaa => aaa + .ReverseNested("tags_to_project", r => r + .Aggregations(aaaa => aaaa + .Terms("top_projects_per_tag", tt => tt + .Field(p => p.Name) + ) + ) + ) + ) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new NestedAggregation("tags") + { + Path = "tags", + Aggregations = new TermsAggregation("tag_names") + { + Field = "tags.name", + Aggregations = new ReverseNestedAggregation("tags_to_project") + { + Aggregations = new TermsAggregation("top_projects_per_tag") + { + Field = Field(p => p.Name) + } + } + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "tags": { + "nested": { + "path": "tags" + }, + "aggs": { + "tag_names": { + "terms": { + "field": "tags.name" + }, + "aggs": { + "tags_to_project": { + "reverse_nested": {}, + "aggs": { + "top_projects_per_tag": { + "terms": { + "field": "name" + } + } + } + } + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var tags = response.Aggs.Nested("tags"); +tags.Should().NotBeNull(); +var tagNames = tags.Terms("tag_names"); +tagNames.Should().NotBeNull(); + +foreach(var tagName in tagNames.Buckets) +{ + tagName.Key.Should().NotBeNullOrEmpty(); + tagName.DocCount.Should().BeGreaterThan(0); + var tagsToProjects = tagName.ReverseNested("tags_to_project"); + tagsToProjects.Should().NotBeNull(); + var topProjectsPerTag = tagsToProjects.Terms("top_projects_per_tag"); + topProjectsPerTag.Should().NotBeNull(); + foreach(var topProject in topProjectsPerTag.Buckets) + { + topProject.Key.Should().NotBeNullOrEmpty(); + topProject.DocCount.Should().BeGreaterThan(0); + } +} +---- + diff --git a/docs/asciidoc/aggregations/bucket/sampler/sampler-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/sampler/sampler-aggregation-usage.asciidoc new file mode 100644 index 00000000000..2f0696050c4 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/sampler/sampler-aggregation-usage.asciidoc @@ -0,0 +1,75 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[sampler-aggregation-usage]] +== Sampler Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(aggs => aggs + .Sampler("sample", sm => sm + .ShardSize(200) + .Aggregations(aa => aa + .SignificantTerms("significant_names", st => st + .Field(p => p.Name) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new SamplerAggregation("sample") + { + ShardSize = 200, + Aggregations = new SignificantTermsAggregation("significant_names") + { + Field = "name" + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "sample": { + "sampler": { + "shard_size": 200 + }, + "aggs": { + "significant_names": { + "significant_terms": { + "field": "name" + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var sample = response.Aggs.Sampler("sample"); +sample.Should().NotBeNull(); +var sigTags = sample.SignificantTerms("significant_names"); +sigTags.Should().NotBeNull(); +---- + diff --git a/docs/asciidoc/aggregations/bucket/significant-terms/significant-terms-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/significant-terms/significant-terms-aggregation-usage.asciidoc new file mode 100644 index 00000000000..33b6cc3225a --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/significant-terms/significant-terms-aggregation-usage.asciidoc @@ -0,0 +1,74 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[significant-terms-aggregation-usage]] +== Significant Terms Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .SignificantTerms("significant_names", st => st + .Field(p => p.Name) + .MinimumDocumentCount(10) + .MutualInformation(mi => mi + .BackgroundIsSuperSet() + .IncludeNegatives() + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new SignificantTermsAggregation("significant_names") + { + Field = Field(p => p.Name), + MinimumDocumentCount = 10, + MutualInformation = new MutualInformationHeuristic + { + BackgroundIsSuperSet = true, + IncludeNegatives = true + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "significant_names": { + "significant_terms": { + "field": "name", + "min_doc_count": 10, + "mutual_information": { + "background_is_superset": true, + "include_negatives": true + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var sigNames = response.Aggs.SignificantTerms("significant_names"); +sigNames.Should().NotBeNull(); +sigNames.DocCount.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/bucket/terms/terms-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/bucket/terms/terms-aggregation-usage.asciidoc new file mode 100644 index 00000000000..240cb4e0e92 --- /dev/null +++ b/docs/asciidoc/aggregations/bucket/terms/terms-aggregation-usage.asciidoc @@ -0,0 +1,113 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[terms-aggregation-usage]] +== Terms Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Terms("states", st => st + .Field(p => p.State) + .MinimumDocumentCount(2) + .Size(5) + .ShardSize(100) + .ExecutionHint(TermsAggregationExecutionHint.Map) + .Missing("n/a") + .Script("'State of Being: '+_value") + .Order(TermsOrder.TermAscending) + .Order(TermsOrder.CountDescending) + .Meta(m => m + .Add("foo", "bar") + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new TermsAggregation("states") + { + Field = Field(p => p.State), + MinimumDocumentCount = 2, + Size = 5, + ShardSize = 100, + ExecutionHint = TermsAggregationExecutionHint.Map, + Missing = "n/a", + Script = new InlineScript("'State of Being: '+_value"), + Order = new List + { + TermsOrder.TermAscending, + TermsOrder.CountDescending + }, + Meta = new Dictionary + { + { "foo", "bar" } + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "states": { + "meta": { + "foo": "bar" + }, + "terms": { + "field": "state", + "min_doc_count": 2, + "size": 5, + "shard_size": 100, + "execution_hint": "map", + "missing": "n/a", + "script": { + "inline": "'State of Being: '+_value" + }, + "order": [ + { + "_term": "asc" + }, + { + "_count": "desc" + } + ] + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var states = response.Aggs.Terms("states"); +states.Should().NotBeNull(); +states.DocCountErrorUpperBound.Should().HaveValue(); +states.SumOtherDocCount.Should().HaveValue(); + +foreach (var item in states.Buckets) +{ + item.Key.Should().NotBeNullOrEmpty(); + item.DocCount.Should().BeGreaterOrEqualTo(1); +} + +states.Meta.Should().NotBeNull().And.HaveCount(1); +states.Meta["foo"].Should().Be("bar"); +---- + diff --git a/docs/asciidoc/aggregations/metric/average/average-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/metric/average/average-aggregation-usage.asciidoc new file mode 100644 index 00000000000..734b5e8cba7 --- /dev/null +++ b/docs/asciidoc/aggregations/metric/average/average-aggregation-usage.asciidoc @@ -0,0 +1,77 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[average-aggregation-usage]] +== Average Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Average("average_commits", avg => avg + .Meta(m => m + .Add("foo", "bar") + ) + .Field(p => p.NumberOfCommits) + .Missing(10) + .Script("_value * 1.2") + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new AverageAggregation("average_commits", Field(p => p.NumberOfCommits)) + { + Meta = new Dictionary + { + { "foo", "bar" } + }, + Missing = 10, + Script = new InlineScript("_value * 1.2") + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "average_commits": { + "meta": { + "foo": "bar" + }, + "avg": { + "field": "numberOfCommits", + "missing": 10.0, + "script": { + "inline": "_value * 1.2" + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var commitsAvg = response.Aggs.Average("average_commits"); +commitsAvg.Should().NotBeNull(); +commitsAvg.Value.Should().BeGreaterThan(0); +commitsAvg.Meta.Should().NotBeNull().And.HaveCount(1); +commitsAvg.Meta["foo"].Should().Be("bar"); +---- + diff --git a/docs/asciidoc/aggregations/metric/cardinality/cardinality-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/metric/cardinality/cardinality-aggregation-usage.asciidoc new file mode 100644 index 00000000000..fb0810351cc --- /dev/null +++ b/docs/asciidoc/aggregations/metric/cardinality/cardinality-aggregation-usage.asciidoc @@ -0,0 +1,60 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[cardinality-aggregation-usage]] +== Cardinality Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Cardinality("state_count", c => c + .Field(p => p.State) + .PrecisionThreshold(100) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new CardinalityAggregation("state_count", Field(p => p.State)) + { + PrecisionThreshold = 100 + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "state_count": { + "cardinality": { + "field": "state", + "precision_threshold": 100 + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectCount = response.Aggs.Cardinality("state_count"); +projectCount.Should().NotBeNull(); +projectCount.Value.Should().Be(3); +---- + diff --git a/docs/asciidoc/aggregations/metric/extended-stats/extended-stats-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/metric/extended-stats/extended-stats-aggregation-usage.asciidoc new file mode 100644 index 00000000000..fa6c851ad10 --- /dev/null +++ b/docs/asciidoc/aggregations/metric/extended-stats/extended-stats-aggregation-usage.asciidoc @@ -0,0 +1,64 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[extended-stats-aggregation-usage]] +== Extended Stats Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .ExtendedStats("commit_stats", es => es + .Field(p => p.NumberOfCommits) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new ExtendedStatsAggregation("commit_stats", Field(p => p.NumberOfCommits)) +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "commit_stats": { + "extended_stats": { + "field": "numberOfCommits" + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var commitStats = response.Aggs.ExtendedStats("commit_stats"); +commitStats.Should().NotBeNull(); +commitStats.Average.Should().BeGreaterThan(0); +commitStats.Max.Should().BeGreaterThan(0); +commitStats.Min.Should().BeGreaterThan(0); +commitStats.Count.Should().BeGreaterThan(0); +commitStats.Sum.Should().BeGreaterThan(0); +commitStats.SumOfSquares.Should().BeGreaterThan(0); +commitStats.StdDeviation.Should().BeGreaterThan(0); +commitStats.StdDeviationBounds.Should().NotBeNull(); +commitStats.StdDeviationBounds.Upper.Should().BeGreaterThan(0); +commitStats.StdDeviationBounds.Lower.Should().NotBe(0); +---- + diff --git a/docs/asciidoc/aggregations/metric/geo-bounds/geo-bounds-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/metric/geo-bounds/geo-bounds-aggregation-usage.asciidoc new file mode 100644 index 00000000000..dcc46cf5830 --- /dev/null +++ b/docs/asciidoc/aggregations/metric/geo-bounds/geo-bounds-aggregation-usage.asciidoc @@ -0,0 +1,72 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-bounds-aggregation-usage]] +== Geo Bounds Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .GeoBounds("viewport", gb => gb + .Field(p => p.Location) + .WrapLongitude(true) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new GeoBoundsAggregation("viewport", Field(p => p.Location)) + { + WrapLongitude = true + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "viewport": { + "geo_bounds": { + "field": "location", + "wrap_longitude": true + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var viewport = response.Aggs.GeoBounds("viewport"); +viewport.Should().NotBeNull(); +viewport.Bounds.Should().NotBeNull(); +var bottomRight = viewport.Bounds.BottomRight; +bottomRight.Should().NotBeNull(); +bottomRight.Lat.Should().HaveValue(); +GeoLocation.IsValidLatitude(bottomRight.Lat.Value).Should().BeTrue(); +bottomRight.Lon.Should().HaveValue(); +GeoLocation.IsValidLongitude(bottomRight.Lon.Value).Should().BeTrue(); +var topLeft = viewport.Bounds.TopLeft; +topLeft.Should().NotBeNull(); +topLeft.Lat.Should().HaveValue(); +GeoLocation.IsValidLatitude(topLeft.Lat.Value).Should().BeTrue(); +topLeft.Lon.Should().HaveValue(); +GeoLocation.IsValidLongitude(topLeft.Lon.Value).Should().BeTrue(); +---- + diff --git a/docs/asciidoc/aggregations/metric/max/max-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/metric/max/max-aggregation-usage.asciidoc new file mode 100644 index 00000000000..d015404711c --- /dev/null +++ b/docs/asciidoc/aggregations/metric/max/max-aggregation-usage.asciidoc @@ -0,0 +1,55 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[max-aggregation-usage]] +== Max Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Max("max_commits", m => m + .Field(p => p.NumberOfCommits) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new MaxAggregation("max_commits", Field(p => p.NumberOfCommits)) +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "max_commits": { + "max": { + "field": "numberOfCommits" + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var max = response.Aggs.Max("max_commits"); +max.Should().NotBeNull(); +max.Value.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/metric/min/min-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/metric/min/min-aggregation-usage.asciidoc new file mode 100644 index 00000000000..4212daf58f2 --- /dev/null +++ b/docs/asciidoc/aggregations/metric/min/min-aggregation-usage.asciidoc @@ -0,0 +1,55 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[min-aggregation-usage]] +== Min Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Min("min_commits", m => m + .Field(p => p.NumberOfCommits) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new MinAggregation("min_commits", Field(p => p.NumberOfCommits)) +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "min_commits": { + "min": { + "field": "numberOfCommits" + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var min = response.Aggs.Max("min_commits"); +min.Should().NotBeNull(); +min.Value.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/metric/percentile-ranks/percentile-ranks-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/metric/percentile-ranks/percentile-ranks-aggregation-usage.asciidoc new file mode 100644 index 00000000000..3b65c0cdefe --- /dev/null +++ b/docs/asciidoc/aggregations/metric/percentile-ranks/percentile-ranks-aggregation-usage.asciidoc @@ -0,0 +1,86 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[percentile-ranks-aggregation-usage]] +== Percentile Ranks Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .PercentileRanks("commits_outlier", pr => pr + .Field(p => p.NumberOfCommits) + .Values(15, 30) + .Method(m => m + .TDigest(td => td + .Compression(200) + ) + ) + .Script("doc['numberOfCommits'].value * 1.2") + .Missing(0) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new PercentileRanksAggregation("commits_outlier", Field(p => p.NumberOfCommits)) + { + Values = new List { 15, 30 }, + Method = new TDigestMethod + { + Compression = 200 + }, + Script = (InlineScript)"doc['numberOfCommits'].value * 1.2", + Missing = 0 + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "commits_outlier": { + "percentile_ranks": { + "field": "numberOfCommits", + "values": [ + 15.0, + 30.0 + ], + "tdigest": { + "compression": 200.0 + }, + "script": { + "inline": "doc['numberOfCommits'].value * 1.2" + }, + "missing": 0.0 + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var commitsOutlier = response.Aggs.PercentileRanks("commits_outlier"); +commitsOutlier.Should().NotBeNull(); +commitsOutlier.Items.Should().NotBeNullOrEmpty(); + +foreach (var item in commitsOutlier.Items) + item.Should().NotBeNull(); +---- + diff --git a/docs/asciidoc/aggregations/metric/percentiles/percentiles-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/metric/percentiles/percentiles-aggregation-usage.asciidoc new file mode 100644 index 00000000000..07c7753ddbb --- /dev/null +++ b/docs/asciidoc/aggregations/metric/percentiles/percentiles-aggregation-usage.asciidoc @@ -0,0 +1,87 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[percentiles-aggregation-usage]] +== Percentiles Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Percentiles("commits_outlier", pr => pr + .Field(p => p.NumberOfCommits) + .Percents(95, 99, 99.9) + .Method(m => m + .HDRHistogram(hdr => hdr + .NumberOfSignificantValueDigits(3) + ) + ) + .Script("doc['numberOfCommits'].value * 1.2") + .Missing(0) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new PercentilesAggregation("commits_outlier", Field(p => p.NumberOfCommits)) + { + Percents = new[] { 95, 99, 99.9 }, + Method = new HDRHistogramMethod + { + NumberOfSignificantValueDigits = 3 + }, + Script = new InlineScript("doc['numberOfCommits'].value * 1.2"), + Missing = 0 + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "commits_outlier": { + "percentiles": { + "field": "numberOfCommits", + "percents": [ + 95.0, + 99.0, + 99.9 + ], + "hdr": { + "number_of_significant_value_digits": 3 + }, + "script": { + "inline": "doc['numberOfCommits'].value * 1.2" + }, + "missing": 0.0 + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var commitsOutlier = response.Aggs.Percentiles("commits_outlier"); +commitsOutlier.Should().NotBeNull(); +commitsOutlier.Items.Should().NotBeNullOrEmpty(); + +foreach (var item in commitsOutlier.Items) + item.Value.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/metric/scripted-metric/scripted-metric-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/metric/scripted-metric/scripted-metric-aggregation-usage.asciidoc new file mode 100644 index 00000000000..b84ed678d92 --- /dev/null +++ b/docs/asciidoc/aggregations/metric/scripted-metric/scripted-metric-aggregation-usage.asciidoc @@ -0,0 +1,75 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[scripted-metric-aggregation-usage]] +== Scripted Metric Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .ScriptedMetric("sum_the_hard_way", sm => sm + .InitScript("_agg['commits'] = []") + .MapScript("if (doc['state'].value == \"Stable\") { _agg.commits.add(doc['numberOfCommits']) }") + .CombineScript("sum = 0; for (c in _agg.commits) { sum += c }; return sum") + .ReduceScript("sum = 0; for (a in _aggs) { sum += a }; return sum") + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new ScriptedMetricAggregation("sum_the_hard_way") + { + InitScript = new InlineScript("_agg['commits'] = []"), + MapScript = new InlineScript("if (doc['state'].value == \"Stable\") { _agg.commits.add(doc['numberOfCommits']) }"), + CombineScript = new InlineScript("sum = 0; for (c in _agg.commits) { sum += c }; return sum"), + ReduceScript = new InlineScript("sum = 0; for (a in _aggs) { sum += a }; return sum") + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "sum_the_hard_way": { + "scripted_metric": { + "init_script": { + "inline": "_agg['commits'] = []" + }, + "map_script": { + "inline": "if (doc['state'].value == \"Stable\") { _agg.commits.add(doc['numberOfCommits']) }" + }, + "combine_script": { + "inline": "sum = 0; for (c in _agg.commits) { sum += c }; return sum" + }, + "reduce_script": { + "inline": "sum = 0; for (a in _aggs) { sum += a }; return sum" + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var sumTheHardWay = response.Aggs.ScriptedMetric("sum_the_hard_way"); +sumTheHardWay.Should().NotBeNull(); +sumTheHardWay.Value().Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/metric/stats/stats-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/metric/stats/stats-aggregation-usage.asciidoc new file mode 100644 index 00000000000..a74cb818864 --- /dev/null +++ b/docs/asciidoc/aggregations/metric/stats/stats-aggregation-usage.asciidoc @@ -0,0 +1,59 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[stats-aggregation-usage]] +== Stats Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Stats("commit_stats", st => st + .Field(p => p.NumberOfCommits) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new StatsAggregation("commit_stats", Field(p => p.NumberOfCommits)) +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "commit_stats": { + "stats": { + "field": "numberOfCommits" + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var commitStats = response.Aggs.Stats("commit_stats"); +commitStats.Should().NotBeNull(); +commitStats.Average.Should().BeGreaterThan(0); +commitStats.Max.Should().BeGreaterThan(0); +commitStats.Min.Should().BeGreaterThan(0); +commitStats.Count.Should().BeGreaterThan(0); +commitStats.Sum.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/metric/sum/sum-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/metric/sum/sum-aggregation-usage.asciidoc new file mode 100644 index 00000000000..2fbce89b3bb --- /dev/null +++ b/docs/asciidoc/aggregations/metric/sum/sum-aggregation-usage.asciidoc @@ -0,0 +1,55 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[sum-aggregation-usage]] +== Sum Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Sum("commits_sum", sm => sm + .Field(p => p.NumberOfCommits) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new SumAggregation("commits_sum", Field(p => p.NumberOfCommits)) +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "commits_sum": { + "sum": { + "field": "numberOfCommits" + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var commitsSum = response.Aggs.Sum("commits_sum"); +commitsSum.Should().NotBeNull(); +commitsSum.Value.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/metric/top-hits/top-hits-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/metric/top-hits/top-hits-aggregation-usage.asciidoc new file mode 100644 index 00000000000..e18646f1660 --- /dev/null +++ b/docs/asciidoc/aggregations/metric/top-hits/top-hits-aggregation-usage.asciidoc @@ -0,0 +1,176 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[top-hits-aggregation-usage]] +== Top Hits Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .Terms("states", t => t + .Field(p => p.State) + .Aggregations(aa => aa + .TopHits("top_state_hits", th => th + .Sort(srt => srt + .Field(p => p.StartedOn) + .Order(SortOrder.Descending) + ) + .Source(src => src + .Include(fs => fs + .Field(p => p.Name) + .Field(p => p.StartedOn) + ) + ) + .Size(1) + .Version() + .Explain() + .FielddataFields(fd => fd + .Field(p => p.State) + .Field(p => p.NumberOfCommits) + ) + .Highlight(h => h + .Fields( + hf => hf.Field(p => p.Tags), + hf => hf.Field(p => p.Description) + ) + ) + .ScriptFields(sfs => sfs + .ScriptField("commit_factor", sf => sf + .Inline("doc['numberOfCommits'].value * 2") + ) + ) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new TermsAggregation("states") + { + Field = Field(p => p.State), + Aggregations = new TopHitsAggregation("top_state_hits") + { + Sort = new List + { + { + new SortField { Field = Field(p => p.StartedOn), Order = SortOrder.Descending } + } + }, + Source = new SourceFilter + { + Include = new [] { "name", "startedOn" } + }, + Size = 1, + Version = true, + Explain = true, + FielddataFields = new [] { "state", "numberOfCommits" }, + Highlight = new Highlight + { + Fields = new Dictionary + { + { Field(p => p.Tags), new HighlightField() }, + { Field(p => p.Description), new HighlightField() } + } + }, + ScriptFields = new ScriptFields + { + { "commit_factor", new ScriptField { Script = new InlineScript("doc['numberOfCommits'].value * 2") } } + } + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "states": { + "terms": { + "field": "state" + }, + "aggs": { + "top_state_hits": { + "top_hits": { + "sort": [ + { + "startedOn": { + "order": "desc" + } + } + ], + "_source": { + "include": [ + "name", + "startedOn" + ] + }, + "size": 1, + "version": true, + "explain": true, + "fielddata_fields": [ + "state", + "numberOfCommits" + ], + "highlight": { + "fields": { + "tags": {}, + "description": {} + } + }, + "script_fields": { + "commit_factor": { + "script": { + "inline": "doc['numberOfCommits'].value * 2" + } + } + } + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var states = response.Aggs.Terms("states"); +states.Should().NotBeNull(); +states.Buckets.Should().NotBeNullOrEmpty(); + +foreach(var state in states.Buckets) +{ + state.Key.Should().NotBeNullOrEmpty(); + state.DocCount.Should().BeGreaterThan(0); + var topStateHits = state.TopHits("top_state_hits"); + topStateHits.Should().NotBeNull(); + topStateHits.Total.Should().BeGreaterThan(0); + var hits = topStateHits.Hits(); + hits.Should().NotBeNullOrEmpty(); + hits.All(h => h.Explanation != null).Should().BeTrue(); + hits.All(h => h.Version.HasValue).Should().BeTrue(); + //hits.All(h => h.Highlights.Count() > 0).Should().BeTrue(); + hits.All(h => h.Fields.ValuesOf("state").Any()).Should().BeTrue(); + hits.All(h => h.Fields.ValuesOf("numberOfCommits").Any()).Should().BeTrue(); + hits.All(h => h.Fields.ValuesOf("commit_factor").Any()).Should().BeTrue(); + topStateHits.Documents().Should().NotBeEmpty(); +} +---- + diff --git a/docs/asciidoc/aggregations/metric/value-count/value-count-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/metric/value-count/value-count-aggregation-usage.asciidoc new file mode 100644 index 00000000000..543960f3202 --- /dev/null +++ b/docs/asciidoc/aggregations/metric/value-count/value-count-aggregation-usage.asciidoc @@ -0,0 +1,55 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[value-count-aggregation-usage]] +== Value Count Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Aggregations(a => a + .ValueCount("commit_count", c => c + .Field(p => p.NumberOfCommits) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new ValueCountAggregation("commit_count", Field(p => p.NumberOfCommits)) +} +---- + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "commit_count": { + "value_count": { + "field": "numberOfCommits" + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var commitCount = response.Aggs.ValueCount("commit_count"); +commitCount.Should().NotBeNull(); +commitCount.Value.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/pipeline/average-bucket/average-bucket-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/average-bucket/average-bucket-aggregation-usage.asciidoc new file mode 100644 index 00000000000..5627246d18e --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/average-bucket/average-bucket-aggregation-usage.asciidoc @@ -0,0 +1,95 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[average-bucket-aggregation-usage]] +== Average Bucket Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + ) + ) + .AverageBucket("average_commits_per_month", aaa => aaa + .BucketsPath("projects_started_per_month>commits") + .GapPolicy(GapPolicy.InsertZeros) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = new SumAggregation("commits", "numberOfCommits") + } + && new AverageBucketAggregation("average_commits_per_month", "projects_started_per_month>commits") + { + GapPolicy = GapPolicy.InsertZeros + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + } + } + }, + "average_commits_per_month": { + "avg_bucket": { + "buckets_path": "projects_started_per_month>commits", + "gap_policy": "insert_zeros" + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); +var averageCommits = response.Aggs.AverageBucket("average_commits_per_month"); +averageCommits.Should().NotBeNull(); +averageCommits.Value.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/pipeline/bucket-script/bucket-script-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/bucket-script/bucket-script-aggregation-usage.asciidoc new file mode 100644 index 00000000000..e930912614a --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/bucket-script/bucket-script-aggregation-usage.asciidoc @@ -0,0 +1,147 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[bucket-script-aggregation-usage]] +== Bucket Script Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + .Filter("stable_state", f => f + .Filter(ff => ff + .Term(p => p.State, "Stable") + ) + .Aggregations(aaa => aaa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + ) + ) + .BucketScript("stable_percentage", bs => bs + .BucketsPath(bp => bp + .Add("totalCommits", "commits") + .Add("stableCommits", "stable_state>commits") + ) + .Script("stableCommits / totalCommits * 100") + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = + new SumAggregation("commits", "numberOfCommits") && + new FilterAggregation("stable_state") + { + Filter = new TermQuery + { + Field = "state", + Value = "Stable" + }, + Aggregations = new SumAggregation("commits", "numberOfCommits") + } && + new BucketScriptAggregation("stable_percentage", new MultiBucketsPath + { + { "totalCommits", "commits" }, + { "stableCommits", "stable_state>commits" } + }) + { + Script = (InlineScript)"stableCommits / totalCommits * 100" + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + }, + "stable_state": { + "filter": { + "term": { + "state": { + "value": "Stable" + } + } + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + } + } + }, + "stable_percentage": { + "bucket_script": { + "buckets_path": { + "totalCommits": "commits", + "stableCommits": "stable_state>commits" + }, + "script": { + "inline": "stableCommits / totalCommits * 100" + } + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); + +foreach(var item in projectsPerMonth.Buckets) +{ + var stablePercentage = item.BucketScript("stable_percentage"); + stablePercentage.Should().NotBeNull(); + stablePercentage.Value.Should().HaveValue(); +} +---- + diff --git a/docs/asciidoc/aggregations/pipeline/bucket-selector/bucket-selector-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/bucket-selector/bucket-selector-aggregation-usage.asciidoc new file mode 100644 index 00000000000..cbdf8078e58 --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/bucket-selector/bucket-selector-aggregation-usage.asciidoc @@ -0,0 +1,109 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[bucket-selector-aggregation-usage]] +== Bucket Selector Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + .BucketSelector("commits_bucket_filter", bs => bs + .BucketsPath(bp => bp + .Add("totalCommits", "commits") + ) + .Script("totalCommits >= 500") + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = + new SumAggregation("commits", "numberOfCommits") && + new BucketSelectorAggregation("commits_bucket_filter", new MultiBucketsPath + { + { "totalCommits", "commits" }, + }) + { + Script = (InlineScript)"totalCommits >= 500" + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + }, + "commits_bucket_filter": { + "bucket_selector": { + "buckets_path": { + "totalCommits": "commits" + }, + "script": { + "inline": "totalCommits >= 500" + } + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); + +foreach(var item in projectsPerMonth.Buckets) +{ + var commits = item.Sum("commits"); + commits.Should().NotBeNull(); + commits.Value.Should().BeGreaterOrEqualTo(500); +} +---- + diff --git a/docs/asciidoc/aggregations/pipeline/cumulative-sum/cumulative-sum-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/cumulative-sum/cumulative-sum-aggregation-usage.asciidoc new file mode 100644 index 00000000000..cc1d1d69a67 --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/cumulative-sum/cumulative-sum-aggregation-usage.asciidoc @@ -0,0 +1,95 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[cumulative-sum-aggregation-usage]] +== Cumulative Sum Aggregation Usage + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); + +foreach (var item in projectsPerMonth.Buckets) +{ + var commitsDerivative = item.Derivative("cumulative_commits"); + commitsDerivative.Should().NotBeNull(); + commitsDerivative.Value.Should().NotBe(null); +} +---- + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + .CumulativeSum("cumulative_commits", d => d + .BucketsPath("commits") + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = + new SumAggregation("commits", "numberOfCommits") && + new CumulativeSumAggregation("cumulative_commits", "commits") + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + }, + "cumulative_commits": { + "cumulative_sum": { + "buckets_path": "commits" + } + } + } + } + } +} +---- + diff --git a/docs/asciidoc/aggregations/pipeline/derivative/derivative-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/derivative/derivative-aggregation-usage.asciidoc new file mode 100644 index 00000000000..3fcaffd2b90 --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/derivative/derivative-aggregation-usage.asciidoc @@ -0,0 +1,95 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[derivative-aggregation-usage]] +== Derivative Aggregation Usage + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); + +foreach (var item in projectsPerMonth.Buckets.Skip(1)) +{ + var commitsDerivative = item.Derivative("commits_derivative"); + commitsDerivative.Should().NotBeNull(); + commitsDerivative.Value.Should().NotBe(null); +} +---- + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + .Derivative("commits_derivative", d => d + .BucketsPath("commits") + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = + new SumAggregation("commits", "numberOfCommits") && + new DerivativeAggregation("commits_derivative", "commits") + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + }, + "commits_derivative": { + "derivative": { + "buckets_path": "commits" + } + } + } + } + } +} +---- + diff --git a/docs/asciidoc/aggregations/pipeline/extended-stats-bucket/extended-stats-bucket-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/extended-stats-bucket/extended-stats-bucket-aggregation-usage.asciidoc new file mode 100644 index 00000000000..d87167ba73f --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/extended-stats-bucket/extended-stats-bucket-aggregation-usage.asciidoc @@ -0,0 +1,104 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[extended-stats-bucket-aggregation-usage]] +== Extended Stats Bucket Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + ) + ) + .ExtendedStatsBucket("extended_stats_commits_per_month", aaa => aaa + .BucketsPath("projects_started_per_month>commits") + .Sigma(2.0) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = new SumAggregation("commits", "numberOfCommits") + } + && new ExtendedStatsBucketAggregation("extended_stats_commits_per_month", "projects_started_per_month>commits") + { + Sigma = 2.0 + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + } + } + }, + "extended_stats_commits_per_month": { + "extended_stats_bucket": { + "buckets_path": "projects_started_per_month>commits", + "sigma": 2.0 + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); +var commitsStats = response.Aggs.ExtendedStatsBucket("extended_stats_commits_per_month"); +commitsStats.Should().NotBeNull(); +commitsStats.Average.Should().BeGreaterThan(0); +commitsStats.Max.Should().BeGreaterThan(0); +commitsStats.Min.Should().BeGreaterThan(0); +commitsStats.Count.Should().BeGreaterThan(0); +commitsStats.Sum.Should().BeGreaterThan(0); +commitsStats.SumOfSquares.Should().BeGreaterThan(0); +commitsStats.StdDeviation.Should().BeGreaterThan(0); +commitsStats.StdDeviationBounds.Should().NotBeNull(); +commitsStats.StdDeviationBounds.Upper.Should().BeGreaterThan(0); +commitsStats.StdDeviationBounds.Lower.Should().NotBe(0); +---- + diff --git a/docs/asciidoc/aggregations/pipeline/max-bucket/max-bucket-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/max-bucket/max-bucket-aggregation-usage.asciidoc new file mode 100644 index 00000000000..1e1110c3d59 --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/max-bucket/max-bucket-aggregation-usage.asciidoc @@ -0,0 +1,95 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[max-bucket-aggregation-usage]] +== Max Bucket Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + ) + ) + .MaxBucket("max_commits_per_month", aaa => aaa + .BucketsPath("projects_started_per_month>commits") + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = new SumAggregation("commits", "numberOfCommits") + } + && new MaxBucketAggregation("max_commits_per_month", "projects_started_per_month>commits") +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + } + } + }, + "max_commits_per_month": { + "max_bucket": { + "buckets_path": "projects_started_per_month>commits" + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); +var maxCommits = response.Aggs.MaxBucket("max_commits_per_month"); +maxCommits.Should().NotBeNull(); +maxCommits.Value.Should().BeGreaterThan(0); +maxCommits.Keys.Should().NotBeNull(); +maxCommits.Keys.Count.Should().BeGreaterOrEqualTo(1); + +foreach (var key in maxCommits.Keys) + key.Should().NotBeNull(); +---- + diff --git a/docs/asciidoc/aggregations/pipeline/min-bucket/min-bucket-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/min-bucket/min-bucket-aggregation-usage.asciidoc new file mode 100644 index 00000000000..62396ebdc34 --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/min-bucket/min-bucket-aggregation-usage.asciidoc @@ -0,0 +1,95 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[min-bucket-aggregation-usage]] +== Min Bucket Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + ) + ) + .MinBucket("min_commits_per_month", aaa => aaa + .BucketsPath("projects_started_per_month>commits") + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = new SumAggregation("commits", "numberOfCommits") + } + && new MinBucketAggregation("min_commits_per_month", "projects_started_per_month>commits") +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + } + } + }, + "min_commits_per_month": { + "min_bucket": { + "buckets_path": "projects_started_per_month>commits" + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); +var minCommits = response.Aggs.MinBucket("min_commits_per_month"); +minCommits.Should().NotBeNull(); +minCommits.Value.Should().BeGreaterThan(0); +minCommits.Keys.Should().NotBeNull(); +minCommits.Keys.Count.Should().BeGreaterOrEqualTo(1); + +foreach (var key in minCommits.Keys) + key.Should().NotBeNullOrEmpty(); +---- + diff --git a/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-ewma-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-ewma-aggregation-usage.asciidoc new file mode 100644 index 00000000000..ea0193398d0 --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-ewma-aggregation-usage.asciidoc @@ -0,0 +1,110 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[moving-average-ewma-aggregation-usage]] +== Moving Average Ewma Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + .MovingAverage("commits_moving_avg", mv => mv + .BucketsPath("commits") + .Model(m => m + .Ewma(e => e + .Alpha(0.3f) + ) + ) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = + new SumAggregation("commits", "numberOfCommits") && + new MovingAverageAggregation("commits_moving_avg", "commits") + { + Model = new EwmaModel + { + Alpha = 0.3f, + } + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + }, + "commits_moving_avg": { + "moving_avg": { + "buckets_path": "commits", + "model": "ewma", + "settings": { + "alpha": 0.3 + } + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); + +foreach(var item in projectsPerMonth.Buckets.Skip(1)) +{ + var movingAvg = item.MovingAverage("commits_moving_avg"); + movingAvg.Should().NotBeNull(); + movingAvg.Value.Should().BeGreaterThan(0); +} +---- + diff --git a/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-holt-linear-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-holt-linear-aggregation-usage.asciidoc new file mode 100644 index 00000000000..2dc8978353f --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-holt-linear-aggregation-usage.asciidoc @@ -0,0 +1,113 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[moving-average-holt-linear-aggregation-usage]] +== Moving Average Holt Linear Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + .MovingAverage("commits_moving_avg", mv => mv + .BucketsPath("commits") + .Model(m => m + .HoltLinear(hl => hl + .Alpha(0.5f) + .Beta(0.5f) + ) + ) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = + new SumAggregation("commits", "numberOfCommits") && + new MovingAverageAggregation("commits_moving_avg", "commits") + { + Model = new HoltLinearModel + { + Alpha = 0.5f, + Beta = 0.5f, + } + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + }, + "commits_moving_avg": { + "moving_avg": { + "buckets_path": "commits", + "model": "holt", + "settings": { + "alpha": 0.5, + "beta": 0.5 + } + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); + +foreach(var item in projectsPerMonth.Buckets.Skip(1)) +{ + var movingAvg = item.MovingAverage("commits_moving_avg"); + movingAvg.Should().NotBeNull(); + movingAvg.Value.Should().BeGreaterThan(0); +} +---- + diff --git a/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-holt-winters-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-holt-winters-aggregation-usage.asciidoc new file mode 100644 index 00000000000..21eeddf84d0 --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-holt-winters-aggregation-usage.asciidoc @@ -0,0 +1,117 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[moving-average-holt-winters-aggregation-usage]] +== Moving Average Holt Winters Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + .MovingAverage("commits_moving_avg", mv => mv + .BucketsPath("commits") + .Window(60) + .Model(m => m + .HoltWinters(hw => hw + .Type(HoltWintersType.Multiplicative) + .Alpha(0.5f) + .Beta(0.5f) + .Gamma(0.5f) + .Period(30) + .Pad(false) + ) + ) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = + new SumAggregation("commits", "numberOfCommits") && + new MovingAverageAggregation("commits_moving_avg", "commits") + { + Window = 60, + Model = new HoltWintersModel + { + Type = HoltWintersType.Multiplicative, + Alpha = 0.5f, + Beta = 0.5f, + Gamma = 0.5f, + Period = 30, + Pad = false + } + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + }, + "commits_moving_avg": { + "moving_avg": { + "buckets_path": "commits", + "window": 60, + "model": "holt_winters", + "settings": { + "type": "mult", + "alpha": 0.5, + "beta": 0.5, + "gamma": 0.5, + "period": 30, + "pad": false + } + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +---- + diff --git a/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-linear-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-linear-aggregation-usage.asciidoc new file mode 100644 index 00000000000..dc62a581a90 --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-linear-aggregation-usage.asciidoc @@ -0,0 +1,106 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[moving-average-linear-aggregation-usage]] +== Moving Average Linear Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + .MovingAverage("commits_moving_avg", mv => mv + .BucketsPath("commits") + .GapPolicy(GapPolicy.InsertZeros) + .Model(m => m + .Linear() + ) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = + new SumAggregation("commits", "numberOfCommits") && + new MovingAverageAggregation("commits_moving_avg", "commits") + { + GapPolicy = GapPolicy.InsertZeros, + Model = new LinearModel() + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + }, + "commits_moving_avg": { + "moving_avg": { + "buckets_path": "commits", + "gap_policy": "insert_zeros", + "model": "linear", + "settings": {} + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); + +foreach(var item in projectsPerMonth.Buckets.Skip(1)) +{ + var movingAvg = item.MovingAverage("commits_moving_avg"); + movingAvg.Should().NotBeNull(); + movingAvg.Value.Should().BeGreaterThan(0); +} +---- + diff --git a/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-simple-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-simple-aggregation-usage.asciidoc new file mode 100644 index 00000000000..d980cc341bd --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/moving-average/moving-average-simple-aggregation-usage.asciidoc @@ -0,0 +1,109 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[moving-average-simple-aggregation-usage]] +== Moving Average Simple Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + .MovingAverage("commits_moving_avg", mv => mv + .BucketsPath("commits") + .Window(30) + .Predict(10) + .Model(m => m + .Simple() + ) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = + new SumAggregation("commits", "numberOfCommits") && + new MovingAverageAggregation("commits_moving_avg", "commits") + { + Window = 30, + Predict = 10, + Model = new SimpleModel() + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + }, + "commits_moving_avg": { + "moving_avg": { + "buckets_path": "commits", + "model": "simple", + "window": 30, + "predict": 10, + "settings": {} + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); + +foreach(var item in projectsPerMonth.Buckets.Skip(1)) +{ + var movingAvg = item.Sum("commits_moving_avg"); + movingAvg.Should().NotBeNull(); + movingAvg.Value.Should().BeGreaterThan(0); +} +---- + diff --git a/docs/asciidoc/aggregations/pipeline/percentiles-bucket/percentiles-bucket-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/percentiles-bucket/percentiles-bucket-aggregation-usage.asciidoc new file mode 100644 index 00000000000..1d609c0a736 --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/percentiles-bucket/percentiles-bucket-aggregation-usage.asciidoc @@ -0,0 +1,102 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[percentiles-bucket-aggregation-usage]] +== Percentiles Bucket Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + ) + ) + .PercentilesBucket("commits_outlier", aaa => aaa + .BucketsPath("projects_started_per_month>commits") + .Percents(95, 99, 99.9) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = new SumAggregation("commits", "numberOfCommits") + } + && new PercentilesBucketAggregation("commits_outlier", "projects_started_per_month>commits") + { + Percents = new[] { 95, 99, 99.9 } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + } + } + }, + "commits_outlier": { + "percentiles_bucket": { + "buckets_path": "projects_started_per_month>commits", + "percents": [ + 95.0, + 99.0, + 99.9 + ] + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); +var commitsOutlier = response.Aggs.PercentilesBucket("commits_outlier"); +commitsOutlier.Should().NotBeNull(); +commitsOutlier.Items.Should().NotBeNullOrEmpty(); + +foreach (var item in commitsOutlier.Items) + item.Value.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/pipeline/serial-differencing/serial-differencing-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/serial-differencing/serial-differencing-aggregation-usage.asciidoc new file mode 100644 index 00000000000..06912dd140b --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/serial-differencing/serial-differencing-aggregation-usage.asciidoc @@ -0,0 +1,100 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[serial-differencing-aggregation-usage]] +== Serial Differencing Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + .SerialDifferencing("thirtieth_difference", d => d + .BucketsPath("commits") + .Lag(30) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = + new SumAggregation("commits", "numberOfCommits") && + new SerialDifferencingAggregation("thirtieth_difference", "commits") + { + Lag = 30 + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + }, + "thirtieth_difference": { + "serial_diff": { + "buckets_path": "commits", + "lag": 30 + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); + +foreach (var item in projectsPerMonth.Buckets) +{ + var commits = item.Sum("commits"); + commits.Should().NotBeNull(); + commits.Value.Should().NotBe(null); +} +---- + diff --git a/docs/asciidoc/aggregations/pipeline/stats-bucket/stats-bucket-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/stats-bucket/stats-bucket-aggregation-usage.asciidoc new file mode 100644 index 00000000000..6bc98b27430 --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/stats-bucket/stats-bucket-aggregation-usage.asciidoc @@ -0,0 +1,94 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[stats-bucket-aggregation-usage]] +== Stats Bucket Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + ) + ) + .StatsBucket("stats_commits_per_month", aaa => aaa + .BucketsPath("projects_started_per_month>commits") + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = new SumAggregation("commits", "numberOfCommits") + } + && new StatsBucketAggregation("stats_commits_per_month", "projects_started_per_month>commits") +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + } + } + }, + "stats_commits_per_month": { + "stats_bucket": { + "buckets_path": "projects_started_per_month>commits" + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); +var commitsStats = response.Aggs.StatsBucket("stats_commits_per_month"); +commitsStats.Should().NotBeNull(); +commitsStats.Average.Should().BeGreaterThan(0); +commitsStats.Max.Should().BeGreaterThan(0); +commitsStats.Min.Should().BeGreaterThan(0); +commitsStats.Count.Should().BeGreaterThan(0); +commitsStats.Sum.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/pipeline/sum-bucket/sum-bucket-aggregation-usage.asciidoc b/docs/asciidoc/aggregations/pipeline/sum-bucket/sum-bucket-aggregation-usage.asciidoc new file mode 100644 index 00000000000..be50897a8cf --- /dev/null +++ b/docs/asciidoc/aggregations/pipeline/sum-bucket/sum-bucket-aggregation-usage.asciidoc @@ -0,0 +1,90 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[sum-bucket-aggregation-usage]] +== Sum Bucket Aggregation Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Size(0) +.Aggregations(a => a + .DateHistogram("projects_started_per_month", dh => dh + .Field(p => p.StartedOn) + .Interval(DateInterval.Month) + .Aggregations(aa => aa + .Sum("commits", sm => sm + .Field(p => p.NumberOfCommits) + ) + ) + ) + .SumBucket("sum_of_commits", aaa => aaa + .BucketsPath("projects_started_per_month>commits") + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + Size = 0, + Aggregations = new DateHistogramAggregation("projects_started_per_month") + { + Field = "startedOn", + Interval = DateInterval.Month, + Aggregations = new SumAggregation("commits", "numberOfCommits") + } + && new SumBucketAggregation("sum_of_commits", "projects_started_per_month>commits") +} +---- + +[source,javascript] +.Example json output +---- +{ + "size": 0, + "aggs": { + "projects_started_per_month": { + "date_histogram": { + "field": "startedOn", + "interval": "month" + }, + "aggs": { + "commits": { + "sum": { + "field": "numberOfCommits" + } + } + } + }, + "sum_of_commits": { + "sum_bucket": { + "buckets_path": "projects_started_per_month>commits" + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); +var projectsPerMonth = response.Aggs.DateHistogram("projects_started_per_month"); +projectsPerMonth.Should().NotBeNull(); +projectsPerMonth.Buckets.Should().NotBeNull(); +projectsPerMonth.Buckets.Count.Should().BeGreaterThan(0); +var commitsSum = response.Aggs.SumBucket("sum_of_commits"); +commitsSum.Should().NotBeNull(); +commitsSum.Value.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/aggregations/writing-aggregations.asciidoc b/docs/asciidoc/aggregations/writing-aggregations.asciidoc new file mode 100644 index 00000000000..d7620f515dc --- /dev/null +++ b/docs/asciidoc/aggregations/writing-aggregations.asciidoc @@ -0,0 +1,180 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[writing-aggregations]] +== Writing Aggregations + +NEST allows you to write your aggregations using + +* a strict fluent DSL + +* a verbatim object initializer syntax that maps verbatim to the Elasticsearch API + +* a more terse object initializer aggregation DSL + +Three different ways, yikes that's a lot to take in! Lets go over them one by one and explain when you might +want to use each. + +This is the json output for each example + +[source,javascript] +.Example json output +---- +{ + "aggs": { + "name_of_child_agg": { + "children": { + "type": "commits" + }, + "aggs": { + "average_per_child": { + "avg": { + "field": "confidenceFactor" + } + }, + "max_per_child": { + "max": { + "field": "confidenceFactor" + } + } + } + } + } +} +---- + +=== Fluent DSL + +The fluent lambda syntax is the most terse way to write aggregations. +It benefits from types that are carried over to sub aggregations + +[source,csharp] +---- +s => s +.Aggregations(aggs => aggs + .Children("name_of_child_agg", child => child + .Aggregations(childAggs => childAggs + .Average("average_per_child", avg => avg.Field(p => p.ConfidenceFactor)) + .Max("max_per_child", avg => avg.Field(p => p.ConfidenceFactor)) + ) + ) +) +---- + +=== Object Initializer syntax + +The object initializer syntax (OIS) is a one-to-one mapping with how aggregations +have to be represented in the Elasticsearch API. While it has the benefit of being a one-to-one +mapping, being dictionary based in C# means it can grow exponentially in complexity rather quickly. + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new ChildrenAggregation("name_of_child_agg", typeof(CommitActivity)) + { + Aggregations = + new AverageAggregation("average_per_child", "confidenceFactor") + && new MaxAggregation("max_per_child", "confidenceFactor") + } +} +---- + +=== Terse Object Initializer DSL + +For this reason the OIS syntax can be shortened dramatically by using `*Agg` related family, +These allow you to forego introducing intermediary Dictionaries to represent the aggregation DSL. +It also allows you to combine multiple aggregations using bitwise AND `&&`) operator. + +Compare the following example with the previous vanilla OIS syntax + +[source,csharp] +---- +new SearchRequest +{ + Aggregations = new ChildrenAggregation("name_of_child_agg", typeof(CommitActivity)) + { + Aggregations = + new AverageAggregation("average_per_child", Field(p => p.ConfidenceFactor)) + && new MaxAggregation("max_per_child", Field(p => p.ConfidenceFactor)) + } +} +---- + +=== Aggregating over a collection of aggregations + +An advanced scenario may involve an existing collection of aggregation functions that should be set as aggregations +on the request. Using LINQ's `.Aggregate()` method, each function can be applied to the aggregation descriptor +`childAggs` below) in turn, returning the descriptor after each function application. + +[source,csharp] +---- +var aggregations = new List, IAggregationContainer>> <1> +{ + a => a.Average("average_per_child", avg => avg.Field(p => p.ConfidenceFactor)), + a => a.Max("max_per_child", avg => avg.Field(p => p.ConfidenceFactor)) +}; +return s => s + .Aggregations(aggs => aggs + .Children("name_of_child_agg", child => child + .Aggregations(childAggs => + aggregations.Aggregate(childAggs, (acc, agg) => { agg(acc); return acc; }) <2> + ) + ) + ); +---- +<1> a list of aggregation functions to apply + +<2> Using LINQ's `Aggregate()` function to accumulate/apply all of the aggregation functions + +[[aggs-vs-aggregations]] +=== Aggs vs. Aggregations + +The response exposes both `.Aggregations` and `.Aggs` properties for handling aggregations. Why two properties you ask? +Well, the former is a dictionary of aggregation names to `IAggregate` types, a common interface for +aggregation responses (termed __Aggregates__ in NEST), and the latter is a convenience helper to get the right type +of aggregation response out of the dictionary based on a key name. + +This is better illustrated with an example + +Let's imagine we make the following request. + +[source,csharp] +---- +s => s +.Aggregations(aggs => aggs + .Children("name_of_child_agg", child => child + .Aggregations(childAggs => childAggs + .Average("average_per_child", avg => avg.Field(p => p.ConfidenceFactor)) + .Max("max_per_child", avg => avg.Field(p => p.ConfidenceFactor)) + ) + ) +) +---- + +=== Aggs usage + +Now, using `.Aggs`, we can easily get the `Children` aggregation response out and from that, +the `Average` and `Max` sub aggregations. + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); + +var childAggregation = response.Aggs.Children("name_of_child_agg"); + +var averagePerChild = childAggregation.Average("average_per_child"); + +averagePerChild.Should().NotBeNull(); <1> + +var maxPerChild = childAggregation.Max("max_per_child"); + +maxPerChild.Should().NotBeNull(); <2> +---- +<1> Do something with the average per child. Here we just assert it's not null + +<2> Do something with the max per child. Here we just assert it's not null + diff --git a/docs/asciidoc/analysis/analyzers/analyzer-usage.asciidoc b/docs/asciidoc/analysis/analyzers/analyzer-usage.asciidoc new file mode 100644 index 00000000000..223b62e0370 --- /dev/null +++ b/docs/asciidoc/analysis/analyzers/analyzer-usage.asciidoc @@ -0,0 +1,77 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[analyzer-usage]] +== Analyzer Usage + +=== Fluent DSL Example + +[source,csharp] +---- +FluentExample +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +InitializerExample +---- + +[source,javascript] +.Example json output +---- +{ + "analysis": { + "analyzer": { + "myCustom": { + "type": "custom", + "tokenizer": "ng", + "filter": [ + "myAscii", + "kstem" + ], + "char_filter": [ + "stripMe", + "patterned" + ] + }, + "myKeyword": { + "type": "keyword" + }, + "myPattern": { + "type": "pattern", + "pattern": "\\w" + }, + "mySimple": { + "type": "simple" + }, + "myLanguage": { + "type": "dutch" + }, + "mySnow": { + "type": "snowball", + "language": "Dutch" + }, + "myStandard": { + "type": "standard", + "max_token_length": 2 + }, + "myStop": { + "type": "stop", + "stopwords_path": "analysis/stopwords.txt" + }, + "myWhiteSpace": { + "type": "whitespace" + }, + "myWhiteSpace2": { + "type": "whitespace" + } + } + } +} +---- + diff --git a/docs/asciidoc/analysis/char-filters/char-filter-usage.asciidoc b/docs/asciidoc/analysis/char-filters/char-filter-usage.asciidoc new file mode 100644 index 00000000000..430437db32b --- /dev/null +++ b/docs/asciidoc/analysis/char-filters/char-filter-usage.asciidoc @@ -0,0 +1,48 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[char-filter-usage]] +== Char Filter Usage + +=== Fluent DSL Example + +[source,csharp] +---- +FluentExample +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +InitializerExample +---- + +[source,javascript] +.Example json output +---- +{ + "analysis": { + "char_filter": { + "stripMe": { + "type": "html_strip" + }, + "patterned": { + "pattern": "x", + "replacement": "y", + "type": "pattern_replace" + }, + "mapped": { + "mappings": [ + "a=>b" + ], + "type": "mapping" + } + } + } +} +---- + diff --git a/docs/asciidoc/analysis/token-filters/token-filter-usage.asciidoc b/docs/asciidoc/analysis/token-filters/token-filter-usage.asciidoc new file mode 100644 index 00000000000..9b8870f4a3b --- /dev/null +++ b/docs/asciidoc/analysis/token-filters/token-filter-usage.asciidoc @@ -0,0 +1,240 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[token-filter-usage]] +== Token Filter Usage + +=== Fluent DSL Example + +[source,csharp] +---- +FluentExample +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +InitializerExample +---- + +[source,javascript] +.Example json output +---- +{ + "analysis": { + "filter": { + "myAscii": { + "type": "asciifolding", + "preserveOriginal": true + }, + "myCommonGrams": { + "type": "common_grams", + "common_words": [ + "x", + "y", + "z" + ], + "ignore_case": true, + "query_mode": true + }, + "mydp": { + "type": "delimited_payload_filter", + "delimiter": "-", + "encoding": "identity" + }, + "dcc": { + "type": "dictionary_decompounder", + "word_list": [ + "x", + "y", + "z" + ], + "min_word_size": 2, + "min_subword_size": 2, + "max_subword_size": 2, + "only_longest_match": true + }, + "etf": { + "type": "edge_ngram", + "min_gram": 1, + "max_gram": 2 + }, + "elision": { + "type": "elision", + "articles": [ + "a", + "b", + "c" + ] + }, + "hunspell": { + "type": "hunspell", + "ignore_case": true, + "locale": "en_US", + "dictionary": "path_to_dict", + "dedup": true, + "longest_only": true + }, + "hypdecomp": { + "type": "hyphenation_decompounder", + "word_list": [ + "x", + "y", + "z" + ], + "min_word_size": 2, + "min_subword_size": 2, + "max_subword_size": 2, + "only_longest_match": true, + "hyphenation_patterns_path": "analysis/fop.xml" + }, + "keeptypes": { + "type": "keep_types", + "types": [ + "", + "" + ] + }, + "keepwords": { + "type": "keep", + "keep_words": [ + "a", + "b", + "c" + ], + "keep_words_case": true + }, + "marker": { + "type": "keyword_marker", + "keywords": [ + "a", + "b" + ], + "ignore_case": true + }, + "kstem": { + "type": "kstem" + }, + "length": { + "type": "length", + "min": 10, + "max": 200 + }, + "limit": { + "type": "limit", + "max_token_count": 12, + "consume_all_tokens": true + }, + "lc": { + "type": "lowercase" + }, + "ngram": { + "type": "ngram", + "min_gram": 3, + "max_gram": 30 + }, + "pc": { + "type": "pattern_capture", + "patterns": [ + "\\d", + "\\w" + ], + "preserve_original": true + }, + "pr": { + "type": "pattern_replace", + "pattern": "(\\d|\\w)", + "replacement": "replacement" + }, + "porter": { + "type": "porter_stem" + }, + "rev": { + "type": "reverse" + }, + "shing": { + "type": "shingle", + "min_shingle_size": 8, + "max_shingle_size": 12, + "output_unigrams": true, + "output_unigrams_if_no_shingles": true, + "token_separator": "|", + "filler_token": "x" + }, + "snow": { + "type": "snowball", + "language": "Dutch" + }, + "standard": { + "type": "standard" + }, + "stem": { + "type": "stemmer", + "language": "arabic" + }, + "stemo": { + "type": "stemmer_override", + "rules_path": "analysis/custom_stems.txt" + }, + "stop": { + "type": "stop", + "stopwords": [ + "x", + "y", + "z" + ], + "ignore_case": true, + "remove_trailing": true + }, + "syn": { + "type": "synonym", + "synonyms_path": "analysis/stopwords.txt", + "format": "wordnet", + "synonyms": [ + "x=>y", + "z=>s" + ], + "ignore_case": true, + "expand": true, + "tokenizer": "whitespace" + }, + "trimmer": { + "type": "trim" + }, + "truncer": { + "type": "truncate", + "length": 100 + }, + "uq": { + "type": "unique", + "only_on_same_position": true + }, + "upper": { + "type": "uppercase" + }, + "wd": { + "type": "word_delimiter", + "generate_word_parts": true, + "generate_number_parts": true, + "catenate_words": true, + "catenate_numbers": true, + "catenate_all": true, + "split_on_case_change": true, + "preserve_original": true, + "split_on_numerics": true, + "stem_english_possessive": true, + "protected_words": [ + "x", + "y", + "z" + ] + } + } + } +} +---- + diff --git a/docs/asciidoc/analysis/tokenizers/tokenizer-usage.asciidoc b/docs/asciidoc/analysis/tokenizers/tokenizer-usage.asciidoc new file mode 100644 index 00000000000..61048a3a42e --- /dev/null +++ b/docs/asciidoc/analysis/tokenizers/tokenizer-usage.asciidoc @@ -0,0 +1,76 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[tokenizer-usage]] +== Tokenizer Usage + +=== Fluent DSL Example + +[source,csharp] +---- +FluentExample +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +InitializerExample +---- + +[source,javascript] +.Example json output +---- +{ + "analysis": { + "tokenizer": { + "endgen": { + "min_gram": 1, + "max_gram": 2, + "token_chars": [ + "digit", + "letter" + ], + "type": "edge_ngram" + }, + "ng": { + "min_gram": 1, + "max_gram": 2, + "token_chars": [ + "digit", + "letter" + ], + "type": "ngram" + }, + "path": { + "delimiter": "|", + "replacement": "-", + "buffer_size": 2048, + "reverse": true, + "skip": 1, + "type": "path_hierarchy" + }, + "pattern": { + "pattern": "\\W+", + "flags": "CASE_INSENSITIVE", + "group": 1, + "type": "pattern" + }, + "standard": { + "type": "standard" + }, + "uax": { + "max_token_length": 12, + "type": "uax_url_email" + }, + "whitespace": { + "type": "whitespace" + } + } + } +} +---- + diff --git a/docs/asciidoc/ClientConcepts/LowLevel/class.png b/docs/asciidoc/class.png similarity index 100% rename from docs/asciidoc/ClientConcepts/LowLevel/class.png rename to docs/asciidoc/class.png diff --git a/docs/asciidoc/client-concepts.asciidoc b/docs/asciidoc/client-concepts.asciidoc new file mode 100644 index 00000000000..9c20575e2e1 --- /dev/null +++ b/docs/asciidoc/client-concepts.asciidoc @@ -0,0 +1,4 @@ +include::low-level.asciidoc[] + +include::high-level.asciidoc[] + diff --git a/docs/asciidoc/client-concepts/connection-pooling/building-blocks/connection-pooling.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/building-blocks/connection-pooling.asciidoc new file mode 100644 index 00000000000..f9d4f35e1a0 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/building-blocks/connection-pooling.asciidoc @@ -0,0 +1,239 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[connection-pooling]] +== Connection Pooling + +Connection pooling is the internal mechanism that takes care of registering what nodes there are in the cluster and which +NEST can use to issue client calls on. There are four types of connection pool + +* <> + +* <> + +* <> + +* <> + +[[single-node-connection-pool]] +=== SingleNodeConnectionPool + +The simplest of all connection pools, this takes a single `Uri` and uses that to connect to Elasticsearch for all the calls +It doesn't opt in to sniffing and pinging behavior, and will never mark nodes dead or alive. The one `Uri` it holds is always +ready to go. + +[source,csharp] +---- +var uri = new Uri("http://localhost:9201"); +var pool = new SingleNodeConnectionPool(uri); +pool.Nodes.Should().HaveCount(1); +var node = pool.Nodes.First(); +node.Uri.Port.Should().Be(9201); +---- + +This type of pool is hardwired to opt out of reseeding (and hence sniffing) + +[source,csharp] +---- +pool.SupportsReseeding.Should().BeFalse(); +---- + +and pinging + +[source,csharp] +---- +pool.SupportsPinging.Should().BeFalse(); +---- + +When you use the low ceremony `ElasticClient` constructor that takes a single `Uri`, +We default to using `SingleNodeConnectionPool` + +[source,csharp] +---- +var client = new ElasticClient(uri); + +client.ConnectionSettings.ConnectionPool.Should().BeOfType(); +---- + +However we urge that you always pass your connection settings explicitly + +[source,csharp] +---- +client = new ElasticClient(new ConnectionSettings(uri)); + +client.ConnectionSettings.ConnectionPool.Should().BeOfType(); +---- + +or even better pass the connection pool explicitly + +[source,csharp] +---- +client = new ElasticClient(new ConnectionSettings(pool)); + +client.ConnectionSettings.ConnectionPool.Should().BeOfType(); +---- + +[[static-connection-pool]] +=== StaticConnectionPool + +The static connection pool is great if you have a known small sized cluster and do no want to enable +sniffing to find out the cluster topology. + +[source,csharp] +---- +var uris = Enumerable.Range(9200, 5).Select(p => new Uri("http://localhost:" + p)); +---- + +a connection pool can be seeded using an enumerable of `Uri`s + +[source,csharp] +---- +var pool = new StaticConnectionPool(uris); +---- + +Or using an enumerable of `Node`s + +[source,csharp] +---- +var nodes = uris.Select(u => new Node(u)); + +pool = new StaticConnectionPool(nodes); +---- + +This type of pool is hardwired to opt out of reseeding (and hence sniffing) + +[source,csharp] +---- +pool.SupportsReseeding.Should().BeFalse(); +---- + +but supports pinging when enabled + +[source,csharp] +---- +pool.SupportsPinging.Should().BeTrue(); +---- + +To create a client using this static connection pool, pass +the connection pool to the `ConnectionSettings` you pass to `ElasticClient` + +[source,csharp] +---- +var client = new ElasticClient(new ConnectionSettings(pool)); + +client.ConnectionSettings.ConnectionPool.Should().BeOfType(); +---- + +[[sniffing-connection-pool]] +=== SniffingConnectionPool + +A subclass of `StaticConnectionPool` that allows itself to be reseeded at run time. +It comes with a very minor overhead of a `ReaderWriterLockSlim` to ensure thread safety. + +[source,csharp] +---- +var uris = Enumerable.Range(9200, 5).Select(p => new Uri("http://localhost:" + p)); +---- + +a connection pool can be seeded using an enumerable of `Uri` + +[source,csharp] +---- +var pool = new SniffingConnectionPool(uris); +---- + +Or using an enumerable of `Node`s. +A major benefit here is you can include known node roles when seeding and +NEST can use this information to favour sniffing on master eligible nodes first +and take master only nodes out of rotation for issuing client calls on. + +[source,csharp] +---- +var nodes = uris.Select(u=>new Node(u)); + +pool = new SniffingConnectionPool(nodes); +---- + +This type of pool is hardwired to opt in to reseeding (and hence sniffing) + +[source,csharp] +---- +pool.SupportsReseeding.Should().BeTrue(); +---- + +and pinging + +[source,csharp] +---- +pool.SupportsPinging.Should().BeTrue(); +---- + +To create a client using the sniffing connection pool pass +the connection pool to the `ConnectionSettings` you pass to `ElasticClient` + +[source,csharp] +---- +var client = new ElasticClient(new ConnectionSettings(pool)); + +client.ConnectionSettings.ConnectionPool.Should().BeOfType(); +---- + +[[sticky-connection-pool]] +=== StickyConnectionPool + +A type of `IConnectionPool` that returns the first live node such that it is sticky between +requests. +It uses https://msdn.microsoft.com/en-us/library/system.threading.interlocked(v=vs.110).aspx[`System.Threading.Interlocked`] +to keep an _indexer_ to the last live node in a thread safe manner. + +[source,csharp] +---- +var uris = Enumerable.Range(9200, 5).Select(p => new Uri("http://localhost:" + p)); +---- + +a connection pool can be seeded using an enumerable of `Uri` + +[source,csharp] +---- +var pool = new StickyConnectionPool(uris); +---- + +Or using an enumerable of `Node`. +A major benefit here is you can include known node roles when seeding and +NEST can use this information to favour sniffing on master eligible nodes first +and take master only nodes out of rotation for issuing client calls on. + +[source,csharp] +---- +var nodes = uris.Select(u=>new Node(u)); + +pool = new StickyConnectionPool(nodes); +---- + +This type of pool is hardwired to opt out of reseeding (and hence sniffing) + +[source,csharp] +---- +pool.SupportsReseeding.Should().BeFalse(); +---- + +but does support pinging + +[source,csharp] +---- +pool.SupportsPinging.Should().BeTrue(); +---- + +To create a client using the sticky connection pool pass +the connection pool to the `ConnectionSettings` you pass to `ElasticClient` + +[source,csharp] +---- +var client = new ElasticClient(new ConnectionSettings(pool)); + +client.ConnectionSettings.ConnectionPool.Should().BeOfType(); +---- + diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/DateTimeProviders.Doc.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/building-blocks/date-time-providers.asciidoc similarity index 50% rename from docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/DateTimeProviders.Doc.asciidoc rename to docs/asciidoc/client-concepts/connection-pooling/building-blocks/date-time-providers.asciidoc index 6865704b143..f647e7d8e6f 100644 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/DateTimeProviders.Doc.asciidoc +++ b/docs/asciidoc/client-concepts/connection-pooling/building-blocks/date-time-providers.asciidoc @@ -1,56 +1,62 @@ -= Date time providers - -Not typically something you'll have to pass to the client but all calls to `System.DateTime.UtcNow` -in the client have been abstracted by `IDateTimeProvider`. This allows us to unit test timeouts and clusterfailover -in run time not being bound to wall clock time. - - -[source, csharp] ----- -var dateTimeProvider = DateTimeProvider.Default; ----- -dates are always returned in UTC - -[source, csharp] ----- -dateTimeProvider.Now().Should().BeCloseTo(DateTime.UtcNow); ----- - -Another responsibility of this interface is to calculate the time a node has to be taken out of rotation -based on the number of attempts to revive it. For very advanced use cases, this might be something of interest -to provide a custom implementation for. - - -[source, csharp] ----- -var dateTimeProvider = DateTimeProvider.Default; ----- - -The default timeout calculation is: `min(timeout * 2 ^ (attempts * 0.5 -1), maxTimeout)` -The default values for `timeout` and `maxTimeout` are - -[source, csharp] ----- -var timeout = TimeSpan.FromMinutes(1); ----- -[source, csharp] ----- -var maxTimeout = TimeSpan.FromMinutes(30); ----- -Plotting these defaults looks as followed: -[[timeout]] -.Default formula, x-axis time in minutes, y-axis number of attempts to revive -image::timeoutplot.png[dead timeout] -The goal here is that whenever a node is resurrected and is found to still be offline, we send it -_back to the doghouse_ for an ever increasingly long period, until we hit a bounded maximum. - -[source, csharp] ----- -var timeouts = Enumerable.Range(0, 30) - .Select(attempt => dateTimeProvider.DeadTime(attempt, timeout, maxTimeout)) - .ToList(); ----- -[source, csharp] ----- -increasedTimeout.Should().BeWithin(maxTimeout); ----- +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[date-time-providers]] +== Date time providers + +Not typically something you'll have to pass to the client but all calls to `System.DateTime.UtcNow` +in the client have been abstracted by `IDateTimeProvider`. This allows us to unit test timeouts and cluster failover +without being bound to wall clock time as calculated by using `System.DateTime.UtcNow` directly. + +[source,csharp] +---- +var dateTimeProvider = DateTimeProvider.Default; +---- + +dates are always returned in UTC + +[source,csharp] +---- +dateTimeProvider.Now().Should().BeCloseTo(DateTime.UtcNow); +---- + +Another responsibility of this interface is to calculate the time a node has to be taken out of rotation +based on the number of attempts to revive it. For very advanced use cases, this might be something of interest +to provide a custom implementation for. + +[source,csharp] +---- +var dateTimeProvider = DateTimeProvider.Default; +---- + +The default timeout calculation is: `min(timeout * 2 ^ (attempts * 0.5 -1), maxTimeout)`, where the +default values for `timeout` and `maxTimeout` are + +[source,csharp] +---- +var timeout = TimeSpan.FromMinutes(1); + +var maxTimeout = TimeSpan.FromMinutes(30); +---- + +Plotting these defaults looks as followed: + +[[timeout]] +.Default formula, x-axis time in minutes, y-axis number of attempts to revive +image::timeoutplot.png[dead timeout] + +The goal here is that whenever a node is resurrected and is found to still be offline, we send it _back to the doghouse_ for an ever increasingly long period, until we hit a bounded maximum. + +[source,csharp] +---- +var timeouts = Enumerable.Range(0, 30) + .Select(attempt => dateTimeProvider.DeadTime(attempt, timeout, maxTimeout)) + .ToList(); + +foreach (var increasedTimeout in timeouts.Take(10)) + increasedTimeout.Should().BeWithin(maxTimeout); +---- + diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/KeepingTrackOfNodes.Doc.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/building-blocks/keeping-track-of-nodes.asciidoc similarity index 66% rename from docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/KeepingTrackOfNodes.Doc.asciidoc rename to docs/asciidoc/client-concepts/connection-pooling/building-blocks/keeping-track-of-nodes.asciidoc index ab9533441a7..1078b219a3a 100644 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/KeepingTrackOfNodes.Doc.asciidoc +++ b/docs/asciidoc/client-concepts/connection-pooling/building-blocks/keeping-track-of-nodes.asciidoc @@ -1,106 +1,137 @@ -= Keeping track of nodes +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current +:github: https://github.com/elastic/elasticsearch-net +:nuget: https://www.nuget.org/packages -[source, csharp] +[[keeping-track-of-nodes]] +== Keeping track of nodes + +=== Creating a Node + +A `Node` can be instantiated by passing it a `Uri` + +[source,csharp] ---- var node = new Node(new Uri("http://localhost:9200")); + node.Uri.Should().NotBeNull(); node.Uri.Port.Should().Be(9200); ---- + By default master eligible and holds data is presumed to be true * -[source, csharp] +[source,csharp] ---- node.MasterEligible.Should().BeTrue(); ----- -[source, csharp] ----- + node.HoldsData.Should().BeTrue(); ---- + Is resurrected is true on first usage, hints to the transport that a ping might be useful -[source, csharp] +[source,csharp] ---- node.IsResurrected.Should().BeTrue(); ---- + When instantiating your connection pool you could switch these to false to initialize the client to a known cluster topology. +=== Building a Node path -passing a node with a path should be preserved. Sometimes an elasticsearch node lives behind a proxy +passing a node with a path should be preserved. +Sometimes an Elasticsearch node lives behind a proxy -[source, csharp] +[source,csharp] ---- var node = new Node(new Uri("http://test.example/elasticsearch")); ----- -[source, csharp] ----- + node.Uri.Port.Should().Be(80); + node.Uri.AbsolutePath.Should().Be("/elasticsearch/"); ---- -We force paths to end with a forward slash so that they can later be safely combined -[source, csharp] +*We force paths to end with a forward slash* so that they can later be safely combined + +[source,csharp] ---- var combinedPath = new Uri(node.Uri, "index/type/_search"); ----- -[source, csharp] ----- + combinedPath.AbsolutePath.Should().Be("/elasticsearch/index/type/_search"); ---- + which is exactly what the `CreatePath` method does on `Node` -[source, csharp] +[source,csharp] ---- combinedPath = node.CreatePath("index/type/_search"); + +combinedPath.AbsolutePath.Should().Be("/elasticsearch/index/type/_search"); ---- -[source, csharp] + +=== Marking Nodes + +[source,csharp] ---- -combinedPath.AbsolutePath.Should().Be("/elasticsearch/index/type/_search"); var node = new Node(new Uri("http://localhost:9200")); + node.FailedAttempts.Should().Be(0); + node.IsAlive.Should().BeTrue(); ---- -every time a node is marked dead the number of attempts should increase +every time a node is marked dead, the number of attempts should increase and the passed datetime should be exposed. - -[source, csharp] +[source,csharp] ---- var deadUntil = DateTime.Now.AddMinutes(1); + node.MarkDead(deadUntil); + node.FailedAttempts.Should().Be(i + 1); + node.IsAlive.Should().BeFalse(); + node.DeadUntil.Should().Be(deadUntil); ---- -however when marking a node alive deaduntil should be reset and attempts reset to 0 -[source, csharp] +however when marking a node alive, the `DeadUntil` property should be reset and `FailedAttempts` reset to 0 + +[source,csharp] ---- node.MarkAlive(); ----- -[source, csharp] ----- + node.FailedAttempts.Should().Be(0); + node.DeadUntil.Should().Be(default(DateTime)); + node.IsAlive.Should().BeTrue(); ---- -Nodes are considered equal if they have the same endpoint no matter what other metadata is associated -[source, csharp] +=== Node Equality + +Nodes are considered equal if they have the same endpoint, no matter what other metadata is associated + +[source,csharp] ---- var node = new Node(new Uri("http://localhost:9200")) { MasterEligible = false }; ----- -[source, csharp] ----- + var nodeAsMaster = new Node(new Uri("http://localhost:9200")) { MasterEligible = true }; + (node == nodeAsMaster).Should().BeTrue(); + (node != nodeAsMaster).Should().BeFalse(); + var uri = new Uri("http://localhost:9200"); + (node == uri).Should().BeTrue(); + var differentUri = new Uri("http://localhost:9201"); + (node != differentUri).Should().BeTrue(); + node.Should().Be(nodeAsMaster); ---- + diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/RequestPipelines.doc.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/building-blocks/request-pipelines.asciidoc similarity index 56% rename from docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/RequestPipelines.doc.asciidoc rename to docs/asciidoc/client-concepts/connection-pooling/building-blocks/request-pipelines.asciidoc index a8bcb9555d8..6477bb22d08 100644 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/RequestPipelines.doc.asciidoc +++ b/docs/asciidoc/client-concepts/connection-pooling/building-blocks/request-pipelines.asciidoc @@ -1,169 +1,251 @@ -= Request pipeline -Every request is executed in the context of `RequestPipeline` when using the default `ITransport` implementation. +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current +:github: https://github.com/elastic/elasticsearch-net +:nuget: https://www.nuget.org/packages -[source, csharp] +[[request-pipeline]] +== Request Pipeline + +Every request is executed in the context of a `RequestPipeline` when using the +default <> implementation. + +[source,csharp] ---- var settings = TestClient.CreateSettings(); ---- -When calling Request(Async) on Transport the whole coordination of the request is deferred to a new instance in a `using` block. -[source, csharp] ----- -var pipeline = new RequestPipeline(settings, DateTimeProvider.Default, new MemoryStreamFactory(), new SearchRequestParameters()); ----- -[source, csharp] +When calling `Request()` or `RequestAsync()` on an `ITransport`, +the whole coordination of the request is deferred to a new instance in a `using` block. + +[source,csharp] ---- +var pipeline = new RequestPipeline( + settings, + DateTimeProvider.Default, + new MemoryStreamFactory(), + new SearchRequestParameters()); + pipeline.GetType().Should().Implement(); ---- -However the transport does not instantiate RequestPipeline directly, it uses a pluggable `IRequestPipelineFactory` -[source, csharp] +An `ITransport` does not instantiate a `RequestPipeline` directly; it uses a pluggable `IRequestPipelineFactory` +to create it + +[source,csharp] ---- var requestPipelineFactory = new RequestPipelineFactory(); ----- -[source, csharp] ----- -var requestPipeline = requestPipelineFactory.Create(settings, DateTimeProvider.Default, new MemoryStreamFactory(), new SearchRequestParameters()); + +var requestPipeline = requestPipelineFactory.Create( + settings, + DateTimeProvider.Default, <1> + new MemoryStreamFactory(), + new SearchRequestParameters()); requestPipeline.Should().BeOfType(); requestPipeline.GetType().Should().Implement(); ---- -which can be passed to the transport when instantiating a client +<1> An <> + +You can pass your own `IRequestPipeline` implementation to the Transport when instantiating a client, +allowing you to have requests executed on your own custom request pipeline -[source, csharp] +[source,csharp] ---- -var transport = new Transport(settings, requestPipelineFactory, DateTimeProvider.Default, new MemoryStreamFactory()); +var transport = new Transport( + settings, + requestPipelineFactory, + DateTimeProvider.Default, + new MemoryStreamFactory()); ---- -this allows you to have requests executed on your own custom request pipeline -[source, csharp] +[source,csharp] ---- var pool = setupPool(new[] { TestClient.CreateNode(), TestClient.CreateNode(9201) }); + var settings = new ConnectionSettings(pool, TestClient.CreateConnection()); + settings = settingsSelector?.Invoke(settings) ?? settings; +---- + +=== Pipeline Behavior + +==== Sniffing on First usage + +[source,csharp] +---- var singleNodePipeline = CreatePipeline(uris => new SingleNodeConnectionPool(uris.First())); + var staticPipeline = CreatePipeline(uris => new StaticConnectionPool(uris)); + var sniffingPipeline = CreatePipeline(uris => new SniffingConnectionPool(uris)); ---- -Here we have setup three pipelines using three different connection pools, lets see how they behave -[source, csharp] +Here we have setup three pipelines using three different connection pools. Let's see how they behave +on first usage + +[source,csharp] ---- singleNodePipeline.FirstPoolUsageNeedsSniffing.Should().BeFalse(); ----- -[source, csharp] ----- + staticPipeline.FirstPoolUsageNeedsSniffing.Should().BeFalse(); + sniffingPipeline.FirstPoolUsageNeedsSniffing.Should().BeTrue(); ---- -Only the cluster that supports reseeding will opt in to FirstPoolUsageNeedsSniffing() -You can however disable this on ConnectionSettings -[source, csharp] +We can see that only the cluster that supports reseeding will opt in to `FirstPoolUsageNeedsSniffing()`; +You can however disable reseeding/sniffing on ConnectionSettings + +[source,csharp] ---- sniffingPipeline = CreatePipeline(uris => new SniffingConnectionPool(uris), s => s.SniffOnStartup(false)); + +sniffingPipeline.FirstPoolUsageNeedsSniffing.Should().BeFalse(); ---- -[source, csharp] + +==== Sniffing on Connection Failure + +[source,csharp] ---- -sniffingPipeline.FirstPoolUsageNeedsSniffing.Should().BeFalse(); var singleNodePipeline = CreatePipeline(uris => new SingleNodeConnectionPool(uris.First())); + var staticPipeline = CreatePipeline(uris => new StaticConnectionPool(uris)); + var sniffingPipeline = CreatePipeline(uris => new SniffingConnectionPool(uris)); + singleNodePipeline.SniffsOnConnectionFailure.Should().BeFalse(); + staticPipeline.SniffsOnConnectionFailure.Should().BeFalse(); + sniffingPipeline.SniffsOnConnectionFailure.Should().BeTrue(); ---- -Only the cluster that supports reseeding will opt in to SniffsOnConnectionFailure() + +Only the cluster that supports reseeding will opt in to SniffsOnConnectionFailure() You can however disable this on ConnectionSettings -[source, csharp] +[source,csharp] ---- sniffingPipeline = CreatePipeline(uris => new SniffingConnectionPool(uris), s => s.SniffOnConnectionFault(false)); + +sniffingPipeline.SniffsOnConnectionFailure.Should().BeFalse(); ---- -[source, csharp] + +==== Sniffing on Stale cluster + +[source,csharp] ---- -sniffingPipeline.SniffsOnConnectionFailure.Should().BeFalse(); var dateTime = new TestableDateTimeProvider(); -var singleNodePipeline = CreatePipeline(uris => new SingleNodeConnectionPool(uris.First(), dateTime), dateTimeProvider: dateTime); -var staticPipeline = CreatePipeline(uris => new StaticConnectionPool(uris, dateTimeProvider: dateTime), dateTimeProvider: dateTime); -var sniffingPipeline = CreatePipeline(uris => new SniffingConnectionPool(uris, dateTimeProvider: dateTime), dateTimeProvider: dateTime); + +var singleNodePipeline = CreatePipeline(uris => + new SingleNodeConnectionPool(uris.First(), dateTime), dateTimeProvider: dateTime); + +var staticPipeline = CreatePipeline(uris => + new StaticConnectionPool(uris, dateTimeProvider: dateTime), dateTimeProvider: dateTime); + +var sniffingPipeline = CreatePipeline(uris => + new SniffingConnectionPool(uris, dateTimeProvider: dateTime), dateTimeProvider: dateTime); + singleNodePipeline.SniffsOnStaleCluster.Should().BeFalse(); + staticPipeline.SniffsOnStaleCluster.Should().BeFalse(); + sniffingPipeline.SniffsOnStaleCluster.Should().BeTrue(); + singleNodePipeline.StaleClusterState.Should().BeFalse(); + staticPipeline.StaleClusterState.Should().BeFalse(); + sniffingPipeline.StaleClusterState.Should().BeFalse(); ---- + go one hour into the future -[source, csharp] +[source,csharp] ---- dateTime.ChangeTime(d => d.Add(TimeSpan.FromHours(2))); ---- + connection pools that do not support reseeding never go stale -[source, csharp] +[source,csharp] ---- singleNodePipeline.StaleClusterState.Should().BeFalse(); ----- -[source, csharp] ----- + staticPipeline.StaleClusterState.Should().BeFalse(); ---- + the sniffing connection pool supports reseeding so the pipeline will signal the state is out of date -[source, csharp] +[source,csharp] ---- sniffingPipeline.StaleClusterState.Should().BeTrue(); ---- -A request pipeline also checks whether the overall time across multiple retries exceeds the request timeout -See the maxretry documentation for more details, here we assert that our request pipeline exposes this propertly +=== Retrying requests + +A request pipeline also checks whether the overall time across multiple retries exceeds the request timeout. +See the <> for more details, here we assert that our request pipeline exposes this propertly -[source, csharp] +[source,csharp] ---- var dateTime = new TestableDateTimeProvider(); -var singleNodePipeline = CreatePipeline(uris => new SingleNodeConnectionPool(uris.First(), dateTime), dateTimeProvider: dateTime); -var staticPipeline = CreatePipeline(uris => new StaticConnectionPool(uris, dateTimeProvider: dateTime), dateTimeProvider: dateTime); -var sniffingPipeline = CreatePipeline(uris => new SniffingConnectionPool(uris, dateTimeProvider: dateTime), dateTimeProvider: dateTime); + +var singleNodePipeline = CreatePipeline(uris => + new SingleNodeConnectionPool(uris.First(), dateTime), dateTimeProvider: dateTime); + +var staticPipeline = CreatePipeline(uris => + new StaticConnectionPool(uris, dateTimeProvider: dateTime), dateTimeProvider: dateTime); + +var sniffingPipeline = CreatePipeline(uris => + new SniffingConnectionPool(uris, dateTimeProvider: dateTime), dateTimeProvider: dateTime); + singleNodePipeline.IsTakingTooLong.Should().BeFalse(); + staticPipeline.IsTakingTooLong.Should().BeFalse(); + sniffingPipeline.IsTakingTooLong.Should().BeFalse(); ---- + go one hour into the future -[source, csharp] +[source,csharp] ---- dateTime.ChangeTime(d => d.Add(TimeSpan.FromHours(2))); ---- + connection pools that do not support reseeding never go stale -[source, csharp] +[source,csharp] ---- singleNodePipeline.IsTakingTooLong.Should().BeTrue(); ----- -[source, csharp] ----- + staticPipeline.IsTakingTooLong.Should().BeTrue(); ---- + the sniffing connection pool supports reseeding so the pipeline will signal the state is out of date -[source, csharp] +[source,csharp] ---- sniffingPipeline.IsTakingTooLong.Should().BeTrue(); ---- + request pipeline exposes the DateTime it started, here we assert it started 2 hours in the past -[source, csharp] +[source,csharp] ---- (dateTime.Now() - singleNodePipeline.StartedOn).Should().BePositive().And.BeCloseTo(TimeSpan.FromHours(2)); ----- -[source, csharp] ----- + (dateTime.Now() - staticPipeline.StartedOn).Should().BePositive().And.BeCloseTo(TimeSpan.FromHours(2)); + (dateTime.Now() - sniffingPipeline.StartedOn).Should().BePositive().And.BeCloseTo(TimeSpan.FromHours(2)); +---- + +[source,csharp] +---- var dateTime = new TestableDateTimeProvider(); -var sniffingPipeline = CreatePipeline(uris => new SniffingConnectionPool(uris, dateTimeProvider: dateTime), dateTimeProvider: dateTime) as RequestPipeline; + +var sniffingPipeline = CreatePipeline(uris => + new SniffingConnectionPool(uris, dateTimeProvider: dateTime), dateTimeProvider: dateTime) as RequestPipeline; + sniffingPipeline.SniffPath.Should().Be("_nodes/_all/settings?flat_settings&timeout=2s"); ---- + diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/timeoutplot.png b/docs/asciidoc/client-concepts/connection-pooling/building-blocks/timeoutplot.png similarity index 100% rename from docs/asciidoc/ClientConcepts/ConnectionPooling/BuildingBlocks/timeoutplot.png rename to docs/asciidoc/client-concepts/connection-pooling/building-blocks/timeoutplot.png diff --git a/docs/asciidoc/client-concepts/connection-pooling/building-blocks/transports.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/building-blocks/transports.asciidoc new file mode 100644 index 00000000000..46e4db85a57 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/building-blocks/transports.asciidoc @@ -0,0 +1,52 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[transports]] +== Transports + +The `ITransport` interface can be seen as the motor block of the client. It's interface is deceitfully simple and +it's ultimately responsible from translating a client call to a response. + +If for some reason you do not agree with the way we wrote the internals of the client, +by implementing a custom `ITransport`, you can circumvent all of it and introduce your own. + +Transport is generically typed to a type that implements `IConnectionConfigurationValues` +This is the minimum `ITransport` needs to report back for the client to function. + +In the low level client, `ElasticLowLevelClient`, a `Transport` is instantiated like this: + +[source,csharp] +---- +var lowLevelTransport = new Transport(new ConnectionConfiguration()); +---- + +and in the high level client, `ElasticClient`, like this: + +[source,csharp] +---- +var highlevelTransport = new Transport(new ConnectionSettings()); + +var connectionPool = new SingleNodeConnectionPool(new Uri("http://localhost:9200")); +var inMemoryTransport = new Transport(new ConnectionSettings(connectionPool, new InMemoryConnection())); +---- + +The only two methods on `ITransport` are `Request()` and `RequestAsync()`; the default `ITransport` implementation is responsible for introducing +many of the building blocks in the client. If you feel that the defaults do not work for you then you can swap them out for your own +custom `ITransport` implementation and if you do, {github}/issues[please let us know] as we'd love to learn why you've go down this route! + +[source,csharp] +---- +var response = inMemoryTransport.Request>( + HttpMethod.GET, + "/_search", + new { query = new { match_all = new { } } }); + +response = await inMemoryTransport.RequestAsync>( + HttpMethod.GET, + "/_search", + new { query = new { match_all = new { } } }); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/exceptions/unexpected-exceptions.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/exceptions/unexpected-exceptions.asciidoc new file mode 100644 index 00000000000..a49b874df69 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/exceptions/unexpected-exceptions.asciidoc @@ -0,0 +1,125 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[unexpected-exceptions]] +== Unexpected exceptions + +When a client call throws an exception that the IConnction can not handle, this exception will bubble +out the client as an UnexpectedElasticsearchClientException, regardless whether the client is configured to throw or not. +An IConnection is in charge of knowning what exceptions it can recover from or not. The default IConnection that is based on WebRequest can and +will recover from WebExceptions but others will be grounds for immediately exiting the pipeline. + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.SucceedAlways()) + .ClientCalls(r => r.OnPort(9201).FailAlways(new Exception("boom!"))) + .StaticConnectionPool() + .Settings(s => s.DisablePing()) +); +audit = await audit.TraceCall( + new ClientCall { + { AuditEvent.HealthyResponse, 9200 }, + } +); +audit = await audit.TraceUnexpectedException( + new ClientCall { + { AuditEvent.BadResponse, 9201 }, + }, + (e) => + { + e.FailureReason.Should().Be(PipelineFailure.Unexpected); + e.InnerException.Should().NotBeNull(); + e.InnerException.Message.Should().Be("boom!"); + } +); +e.FailureReason.Should().Be(PipelineFailure.Unexpected); +e.InnerException.Should().NotBeNull(); +e.InnerException.Message.Should().Be("boom!"); +---- + +Sometimes an unexpected exception happens further down in the pipeline, this is why we +wrap them inside an UnexpectedElasticsearchClientException so that information about where +in the pipeline the unexpected exception is not lost, here a call to 9200 fails using a webexception. +It then falls over to 9201 which throws an hard exception from within IConnection. We assert that we +can still see the audit trail for the whole coordinated request. + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) +#if DOTNETCORE + .ClientCalls(r => r.OnPort(9200).FailAlways(new System.Net.Http.HttpRequestException("recover"))) +#else + .ClientCalls(r => r.OnPort(9200).FailAlways(new WebException("recover"))) +#endif + .ClientCalls(r => r.OnPort(9201).FailAlways(new Exception("boom!"))) + .StaticConnectionPool() + .Settings(s => s.DisablePing()) +); + +audit = await audit.TraceUnexpectedException( + new ClientCall { + { AuditEvent.BadResponse, 9200 }, + { AuditEvent.BadResponse, 9201 }, + }, + (e) => + { + e.FailureReason.Should().Be(PipelineFailure.Unexpected); + e.InnerException.Should().NotBeNull(); + e.InnerException.Message.Should().Be("boom!"); + } +); + +e.FailureReason.Should().Be(PipelineFailure.Unexpected); + +e.InnerException.Should().NotBeNull(); + +e.InnerException.Message.Should().Be("boom!"); +---- + +An unexpected hard exception on ping and sniff is something we *do* try to revover from and failover. +Here pinging nodes on first use is enabled and 9200 throws on ping, we still fallover to 9201's ping succeeds. +However the client call on 9201 throws a hard exception we can not recover from + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .Ping(r => r.OnPort(9200).FailAlways(new Exception("ping exception"))) + .Ping(r => r.OnPort(9201).SucceedAlways()) + .ClientCalls(r => r.OnPort(9201).FailAlways(new Exception("boom!"))) + .StaticConnectionPool() + .AllDefaults() +); +---- + +[source,csharp] +---- +audit = await audit.TraceUnexpectedException( + new ClientCall { + { AuditEvent.PingFailure, 9200 }, + { AuditEvent.PingSuccess, 9201 }, + { AuditEvent.BadResponse, 9201 }, + }, + (e) => + { + e.FailureReason.Should().Be(PipelineFailure.Unexpected); +e.InnerException.Should().NotBeNull(); + e.InnerException.Message.Should().Be("boom!"); +e.SeenExceptions.Should().NotBeEmpty(); + var pipelineException = e.SeenExceptions.First(); + pipelineException.FailureReason.Should().Be(PipelineFailure.PingFailure); + pipelineException.InnerException.Message.Should().Be("ping exception"); +var pingException = e.AuditTrail.First(a => a.Event == AuditEvent.PingFailure).Exception; + pingException.Should().NotBeNull(); + pingException.Message.Should().Be("ping exception"); + + } +); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/exceptions/unrecoverable-exceptions.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/exceptions/unrecoverable-exceptions.asciidoc new file mode 100644 index 00000000000..3a1f5c3a497 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/exceptions/unrecoverable-exceptions.asciidoc @@ -0,0 +1,188 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[unrecoverable-exceptions]] +== Unrecoverable exceptions + +Unrecoverable exceptions are _excepted_ exceptions that are grounds to exit the client pipeline immediately. +By default, the client won't throw on any `ElasticsearchClientException` but instead return an invalid response which +can be detected by checking `.IsValid` on the response +You can configure the client to throw using `ThrowExceptions()` on `ConnectionSettings`. The following test +both a client that throws and one that returns an invalid response with an `.OriginalException` exposed + +The following are recoverable exceptions + +[source,csharp] +---- +var recoverablExceptions = new[] +{ + new PipelineException(PipelineFailure.BadResponse), + new PipelineException(PipelineFailure.PingFailure), +}; + +recoverablExceptions.Should().OnlyContain(e => e.Recoverable); +---- + +and the unrecoverable exceptions + +[source,csharp] +---- +var unrecoverableExceptions = new[] +{ + new PipelineException(PipelineFailure.CouldNotStartSniffOnStartup), + new PipelineException(PipelineFailure.SniffFailure), + new PipelineException(PipelineFailure.Unexpected), + new PipelineException(PipelineFailure.BadAuthentication), + new PipelineException(PipelineFailure.MaxRetriesReached), + new PipelineException(PipelineFailure.MaxTimeoutReached) +}; + +unrecoverableExceptions.Should().OnlyContain(e => !e.Recoverable); +---- + +As an example, let's set up a 10 node cluster that will always succeed when pinged but + will fail with a 401 response when making client calls + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .Ping(r => r.SucceedAlways()) + .ClientCalls(r => r.FailAlways(401)) + .StaticConnectionPool() + .AllDefaults() +); +---- + +Here we make a client call and determine that the first audit event was a successful ping, +followed by a bad response as a result of a bad authentication response + +[source,csharp] +---- +audit = await audit.TraceElasticsearchException( + new ClientCall { + { AuditEvent.PingSuccess, 9200 }, + { AuditEvent.BadResponse, 9200 }, + }, + (e) => + { + e.FailureReason.Should().Be(PipelineFailure.BadAuthentication); + } +); +---- + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .Ping(r => r.SucceedAlways()) + .ClientCalls(r => r.FailAlways(401).ReturnResponse(ResponseHtml)) + .StaticConnectionPool() + .AllDefaults() +); + +audit = await audit.TraceElasticsearchException( + new ClientCall { + { AuditEvent.PingSuccess, 9200 }, + { AuditEvent.BadResponse, 9200 }, + }, + (e) => + { + e.FailureReason.Should().Be(PipelineFailure.BadAuthentication); + e.Response.HttpStatusCode.Should().Be(401); + e.Response.ResponseBodyInBytes.Should().BeNull(); + } +); + +e.FailureReason.Should().Be(PipelineFailure.BadAuthentication); + +e.Response.HttpStatusCode.Should().Be(401); + +e.Response.ResponseBodyInBytes.Should().BeNull(); +---- + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .Ping(r => r.SucceedAlways()) + .ClientCalls(r => r.FailAlways(401).ReturnResponse(ResponseHtml)) + .StaticConnectionPool() + .Settings(s=>s.DisableDirectStreaming()) +); + +audit = await audit.TraceElasticsearchException( + new ClientCall { + { AuditEvent.PingSuccess, 9200 }, + { AuditEvent.BadResponse, 9200 }, + }, + (e) => + { + e.FailureReason.Should().Be(PipelineFailure.BadAuthentication); + e.Response.HttpStatusCode.Should().Be(401); + e.Response.ResponseBodyInBytes.Should().NotBeNull(); + var responseString = Encoding.UTF8.GetString(e.Response.ResponseBodyInBytes); + responseString.Should().Contain("nginx/"); + e.DebugInformation.Should().Contain("nginx/"); + } +); + +e.FailureReason.Should().Be(PipelineFailure.BadAuthentication); + +e.Response.HttpStatusCode.Should().Be(401); + +e.Response.ResponseBodyInBytes.Should().NotBeNull(); + +var responseString = Encoding.UTF8.GetString(e.Response.ResponseBodyInBytes); + +responseString.Should().Contain("nginx/"); + +e.DebugInformation.Should().Contain("nginx/"); +---- + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .Ping(r => r.SucceedAlways()) + .ClientCalls(r => r.FailAlways(401).ReturnResponse(ResponseHtml)) + .StaticConnectionPool() + .Settings(s=>s.DisableDirectStreaming().DefaultIndex("default-index")) + .ClientProxiesTo( + (c, r) => c.Get("1", s=>s.RequestConfiguration(r)), + async (c, r) => await c.GetAsync("1", s=>s.RequestConfiguration(r)) as IResponse + ) +); + +audit = await audit.TraceElasticsearchException( + new ClientCall { + { AuditEvent.PingSuccess, 9200 }, + { AuditEvent.BadResponse, 9200 }, + }, + (e) => + { + e.FailureReason.Should().Be(PipelineFailure.BadAuthentication); + e.Response.HttpStatusCode.Should().Be(401); + e.Response.ResponseBodyInBytes.Should().NotBeNull(); + var responseString = Encoding.UTF8.GetString(e.Response.ResponseBodyInBytes); + responseString.Should().Contain("nginx/"); + e.DebugInformation.Should().Contain("nginx/"); + } +); + +e.FailureReason.Should().Be(PipelineFailure.BadAuthentication); + +e.Response.HttpStatusCode.Should().Be(401); + +e.Response.ResponseBodyInBytes.Should().NotBeNull(); + +var responseString = Encoding.UTF8.GetString(e.Response.ResponseBodyInBytes); + +responseString.Should().Contain("nginx/"); + +e.DebugInformation.Should().Contain("nginx/"); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/failover/falling-over.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/failover/falling-over.asciidoc new file mode 100644 index 00000000000..1034edd1355 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/failover/falling-over.asciidoc @@ -0,0 +1,97 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[falling-over]] +== Fail over + +When using connection pooling and the pool has sufficient nodes a request will be retried if +the call to a node throws an exception or returns a 502 or 503 + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways()) + .ClientCalls(r => r.OnPort(9201).SucceedAlways()) + .StaticConnectionPool() + .Settings(s => s.DisablePing()) +); +audit = await audit.TraceCall( + new ClientCall { + { BadResponse, 9200 }, + { HealthyResponse, 9201 }, + } +); +---- + +[[bad-gateway]] +=== 502 Bad Gateway + +Will be treated as an error that requires retrying + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways(502)) + .ClientCalls(r => r.OnPort(9201).SucceedAlways()) + .StaticConnectionPool() + .Settings(s => s.DisablePing()) +); + +audit = await audit.TraceCall( + new ClientCall { + { BadResponse, 9200 }, + { HealthyResponse, 9201 }, + } +); +---- + +[[service-unavailable]] +=== 503 Service Unavailable + +Will be treated as an error that requires retrying + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways(503)) + .ClientCalls(r => r.OnPort(9201).SucceedAlways()) + .StaticConnectionPool() + .Settings(s => s.DisablePing()) +); + +audit = await audit.TraceCall( + new ClientCall { + { BadResponse, 9200 }, + { HealthyResponse, 9201 }, + } +); +---- + +If a call returns a valid http status code other than 502 or 503, the request won't be retried. + +IMPORTANT: Different requests may have different status codes that are deemed valid. For example, +a *404 Not Found* response is a valid status code for an index exists request + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways(418)) + .ClientCalls(r => r.OnPort(9201).SucceedAlways()) + .StaticConnectionPool() + .Settings(s => s.DisablePing()) +); + +audit = await audit.TraceCall( + new ClientCall { + { BadResponse, 9200 }, + } +); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/max-retries/respects-max-retry.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/max-retries/respects-max-retry.asciidoc new file mode 100644 index 00000000000..a932cc42591 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/max-retries/respects-max-retry.asciidoc @@ -0,0 +1,158 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[max-retries]] +== Max Retries + +By default, NEST will retry as many times as there are nodes in the cluster that the client knows about. +Retries still respects the request timeout however, +meaning if you have a 100 node cluster and a request timeout of 20 seconds, +the client will retry as many times as it before giving up at the request timeout of 20 seconds. + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways()) + .ClientCalls(r => r.OnPort(9209).SucceedAlways()) + .StaticConnectionPool() + .Settings(s => s.DisablePing()) +); +audit = await audit.TraceCall( + new ClientCall { + { BadResponse, 9200 }, + { BadResponse, 9201 }, + { BadResponse, 9202 }, + { BadResponse, 9203 }, + { BadResponse, 9204 }, + { BadResponse, 9205 }, + { BadResponse, 9206 }, + { BadResponse, 9207 }, + { BadResponse, 9208 }, + { HealthyResponse, 9209 } + } +); +---- + +When you have a 100 node cluster, you might want to ensure a fixed number of retries. + +IMPORTANT: the actual number of requests is **initial attempt + set number of retries** + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways()) + .ClientCalls(r => r.OnPort(9209).SucceedAlways()) + .StaticConnectionPool() + .Settings(s => s.DisablePing().MaximumRetries(3)) +); + +audit = await audit.TraceCall( + new ClientCall { + { BadResponse, 9200 }, + { BadResponse, 9201 }, + { BadResponse, 9202 }, + { BadResponse, 9203 }, + { MaxRetriesReached } + } +); +---- + +In our previous test we simulated very fast failures, but in the real world a call might take upwards of a second. +In this next example, we simulate a particular heavy search that takes 10 seconds to fail, and set a request timeout of 20 seconds. +We see that the request is tried twice and gives up before a third call is attempted, since the call takes 10 seconds and thus can be +tried twice (initial call and one retry) before the request timeout. + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(10))) + .ClientCalls(r => r.OnPort(9209).SucceedAlways()) + .StaticConnectionPool() + .Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(20))) +); + +audit = await audit.TraceCall( + new ClientCall { + { BadResponse, 9200 }, + { BadResponse, 9201 }, + { MaxTimeoutReached } + } +); +---- + +If you set a smaller request timeout you might not want it to also affect the retry timeout. +In cases like this, you can configure the `MaxRetryTimeout` separately. +Here we simulate calls taking 3 seconds, a request timeout of 2 seconds and a max retry timeout of 10 seconds. +We should see 5 attempts to perform this query, testing that our request timeout cuts the query off short and that +our max retry timeout of 10 seconds wins over the configured request timeout + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3))) + .ClientCalls(r => r.OnPort(9209).FailAlways()) + .StaticConnectionPool() + .Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(2)).MaxRetryTimeout(TimeSpan.FromSeconds(10))) +); + +audit = await audit.TraceCall( + new ClientCall { + { BadResponse, 9200 }, + { BadResponse, 9201 }, + { BadResponse, 9202 }, + { BadResponse, 9203 }, + { BadResponse, 9204 }, + { MaxTimeoutReached } + } +); +---- + +If your retry policy expands beyond the number of available nodes, the client **won't** retry the same node twice + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(2) + .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3))) + .ClientCalls(r => r.OnPort(9209).SucceedAlways()) + .StaticConnectionPool() + .Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(2)).MaxRetryTimeout(TimeSpan.FromSeconds(10))) +); + +audit = await audit.TraceCall( + new ClientCall { + { BadResponse, 9200 }, + { BadResponse, 9201 }, + { MaxRetriesReached } + } +); +---- + +This makes setting any retry setting on a single node connection pool a no-op by design! +Connection pooling and failover is all about trying to fail sanely whilst still utilizing the available resources and +not giving up on the fail fast principle; **It is NOT a mechanism for forcing requests to succeed.** + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3))) + .ClientCalls(r => r.OnPort(9209).SucceedAlways()) + .SingleNodeConnection() + .Settings(s => s.DisablePing().MaximumRetries(10)) +); + +audit = await audit.TraceCall( + new ClientCall { + { BadResponse, 9200 } + } +); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/pinging/first-usage.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/pinging/first-usage.asciidoc new file mode 100644 index 00000000000..01ece2c32b3 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/pinging/first-usage.asciidoc @@ -0,0 +1,119 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[pinging-first-usage]] +== Pinging - First Usage + +Pinging is enabled by default for the <>, <> and <> connection pools. +This means that the first time a node is used or resurrected, a ping is issued a with a small (configurable) timeout, +allowing the client to fail and fallover to a healthy node much faster than attempting a request that may be heavier than a ping. + +Here's an example with a cluster with 2 nodes where the second node fails on ping + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(2) + .Ping(p => p.Succeeds(Always)) + .Ping(p => p.OnPort(9201).FailAlways()) + .StaticConnectionPool() + .AllDefaults() +); +---- + +When making the calls, the first call goes to 9200 which succeeds, +and the 2nd call does a ping on 9201 because it's used for the first time. +The ping fails so we wrap over to node 9200 which we've already pinged. + +Finally we assert that the connectionpool has one node that is marked as dead + +[source,csharp] +---- +await audit.TraceCalls( + + new ClientCall { + { PingSuccess, 9200}, + { HealthyResponse, 9200}, + { pool => + { + pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(0); + } } + }, + new ClientCall { + { PingFailure, 9201}, + { HealthyResponse, 9200}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(1) } + } +); +---- + +A cluster with 4 nodes where the second and third pings fail + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(4) + .Ping(p => p.SucceedAlways()) + .Ping(p => p.OnPort(9201).FailAlways()) + .Ping(p => p.OnPort(9202).FailAlways()) + .StaticConnectionPool() + .AllDefaults() +); +---- + +The first call goes to 9200 which succeeds + +The 2nd call does a ping on 9201 because its used for the first time. +It fails and so we ping 9202 which also fails. We then ping 9203 becuase +we haven't used it before and it succeeds + +Finally we assert that the connectionpool has two nodes that are marked as dead + +[source,csharp] +---- +await audit.TraceCalls( +new ClientCall { + { PingSuccess, 9200}, + { HealthyResponse, 9200}, + { pool => + { + pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(0); + } } + }, +new ClientCall { + { PingFailure, 9201}, + { PingFailure, 9202}, + { PingSuccess, 9203}, + { HealthyResponse, 9203}, +{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } + } +); +---- + +A healthy cluster of 4 (min master nodes of 3 of course!) + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(4) + .Ping(p => p.SucceedAlways()) + .StaticConnectionPool() + .AllDefaults() +); + +await audit.TraceCalls( + new ClientCall { { PingSuccess, 9200}, { HealthyResponse, 9200} }, + new ClientCall { { PingSuccess, 9201}, { HealthyResponse, 9201} }, + new ClientCall { { PingSuccess, 9202}, { HealthyResponse, 9202} }, + new ClientCall { { PingSuccess, 9203}, { HealthyResponse, 9203} }, + new ClientCall { { HealthyResponse, 9200} }, + new ClientCall { { HealthyResponse, 9201} }, + new ClientCall { { HealthyResponse, 9202} }, + new ClientCall { { HealthyResponse, 9203} }, + new ClientCall { { HealthyResponse, 9200} } +); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/pinging/revival.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/pinging/revival.asciidoc new file mode 100644 index 00000000000..3faf4c7dd70 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/pinging/revival.asciidoc @@ -0,0 +1,56 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[pinging-revival]] +== Pinging - Revival + +When a node is marked dead it will only be put in the dog house for a certain amount of time. Once it comes out of the dog house, or revived, we schedule a ping +before the actual call to make sure its up and running. If its still down we put it _back in the dog house_ a little longer. +Take a look at the <> for an explanation on these timeouts. + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(3) + .ClientCalls(r => r.SucceedAlways()) + .ClientCalls(r => r.OnPort(9202).Fails(Once)) + .Ping(p => p.SucceedAlways()) + .StaticConnectionPool() + .AllDefaults() +); +audit = await audit.TraceCalls( + new ClientCall { { PingSuccess, 9200 }, { HealthyResponse, 9200 } }, + new ClientCall { { PingSuccess, 9201 }, { HealthyResponse, 9201 } }, + new ClientCall { + { PingSuccess, 9202}, + { BadResponse, 9202}, + { HealthyResponse, 9200}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(1) } + }, + new ClientCall { { HealthyResponse, 9201 } }, + new ClientCall { { HealthyResponse, 9200 } }, + new ClientCall { { HealthyResponse, 9201 } }, + new ClientCall { + { HealthyResponse, 9200 }, + { pool => pool.Nodes.First(n=>!n.IsAlive).DeadUntil.Should().BeAfter(DateTime.UtcNow) } + } +); +audit = await audit.TraceCalls( + new ClientCall { { HealthyResponse, 9201 } }, + new ClientCall { { HealthyResponse, 9200 } }, + new ClientCall { { HealthyResponse, 9201 } } +); +audit.ChangeTime(d => d.AddMinutes(20)); +audit = await audit.TraceCalls( + new ClientCall { { HealthyResponse, 9201 } }, + new ClientCall { + { Resurrection, 9202 }, + { PingSuccess, 9202 }, + { HealthyResponse, 9202 } + } +); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/request-overrides/disable-sniff-ping-per-request.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/request-overrides/disable-sniff-ping-per-request.asciidoc new file mode 100644 index 00000000000..6be5fab01dc --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/request-overrides/disable-sniff-ping-per-request.asciidoc @@ -0,0 +1,108 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[disabling-sniffing-and-pinging-on-a-request-basis]] +== Disabling sniffing and pinging on a request basis + +Even if you are using a sniffing connection pool thats set up to sniff on start/failure +and pinging enabled, you can opt out of this behaviour on a _per request_ basis. + +In our first test we set up a cluster that pings and sniffs on startup +but we disable the sniffing on our first request so we only see the ping and the response + +Let's set up the cluster and configure clients to **always** sniff on startup + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.SucceedAlways()) + .SniffingConnectionPool() + .Settings(s => s.SniffOnStartup()) <1> +); +---- +<1> sniff on startup + +Now We disable sniffing on the request so even though it's our first call, we do not want to sniff on startup + +Instead, the sniff on startup is deferred to the second call into the cluster that +does not disable sniffing on a per request basis + +And after that no sniff on startup will happen again + +[source,csharp] +---- +audit = await audit.TraceCalls( +new ClientCall(r => r.DisableSniffing()) <1> + { + { PingSuccess, 9200 }, <2> + { HealthyResponse, 9200 } + }, +new ClientCall() + { + { SniffOnStartup }, <3> + { SniffSuccess, 9200 }, + { PingSuccess, 9200 }, + { HealthyResponse, 9200 } + }, +new ClientCall() + { + { PingSuccess, 9201 }, + { HealthyResponse, 9201 } + } +); +---- +<1> disable sniffing + +<2> first call is a successful ping + +<3> sniff on startup call happens here, on the second call + +Now, let's disable pinging on the request + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.SucceedAlways()) + .SniffingConnectionPool() + .Settings(s => s.SniffOnStartup()) +); +audit = await audit.TraceCall( + new ClientCall(r => r.DisablePing()) <1> + { + { SniffOnStartup }, + { SniffSuccess, 9200 }, <2> + { HealthyResponse, 9200 } + } +); +---- +<1> disable ping + +<2> No ping after sniffing + +Finally, let's demonstrate disabling both sniff and ping on the request + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.SucceedAlways()) + .SniffingConnectionPool() + .Settings(s => s.SniffOnStartup()) +); + +audit = await audit.TraceCall( + new ClientCall(r=>r.DisableSniffing().DisablePing()) <1> + { + { HealthyResponse, 9200 } <2> + } +); +---- +<1> diable ping and sniff + +<2> no ping or sniff before the call + diff --git a/docs/asciidoc/client-concepts/connection-pooling/request-overrides/request-timeouts-overrides.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/request-overrides/request-timeouts-overrides.asciidoc new file mode 100644 index 00000000000..4cb2ff5b448 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/request-overrides/request-timeouts-overrides.asciidoc @@ -0,0 +1,97 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[request-timeouts]] +== Request Timeouts + +While you can specify Request time out globally you can override this per request too + +we set up a 10 node cluster with a global time out of 20 seconds. +Each call on a node takes 10 seconds. So we can only try this call on 2 nodes +before the max request time out kills the client call. + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(10))) + .ClientCalls(r => r.OnPort(9209).SucceedAlways()) + .StaticConnectionPool() + .Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(20))) +); +---- + +On the second request we specify a request timeout override to 80 seconds +We should now see more nodes being tried. + +[source,csharp] +---- +audit = await audit.TraceCalls( + new ClientCall { + { BadResponse, 9200 }, + { BadResponse, 9201 }, + { MaxTimeoutReached } + }, +new ClientCall(r => r.RequestTimeout(TimeSpan.FromSeconds(80))) + { + { BadResponse, 9203 }, + { BadResponse, 9204 }, + { BadResponse, 9205 }, + { BadResponse, 9206 }, + { BadResponse, 9207 }, + { BadResponse, 9208 }, + { HealthyResponse, 9209 }, + } +); +---- + +[[connect-timeouts]] +== Connect Timeouts + +Connect timeouts can be overridden on a per request basis. Whilst the underlying `WebRequest` (in the case of full CLR) +and `HttpClient` (in the case of Core CLR) cannot distinguish between connect and retry timeouts, +we use a separate configuration value for ping requests. + +we set up a 10 node cluster with a global time out of 20 seconds. +Each call on a node takes 10 seconds. So we can only try this call on 2 nodes +before the max request time out kills the client call. + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .Ping(p => p.SucceedAlways().Takes(TimeSpan.FromSeconds(20))) + .ClientCalls(r => r.SucceedAlways()) + .StaticConnectionPool() + .Settings(s => s.RequestTimeout(TimeSpan.FromSeconds(10)).PingTimeout(TimeSpan.FromSeconds(10))) +); +---- + +The first call uses the configured global settings, request times out after 10 seconds and ping +calls always take 20, so we should see a single ping failure + +On the second request we set a request ping timeout override of 2seconds +We should now see more nodes being tried before the request timeout is hit. + +[source,csharp] +---- +audit = await audit.TraceCalls( +new ClientCall { + { PingFailure, 9200 }, + { MaxTimeoutReached } + }, +new ClientCall(r => r.PingTimeout(TimeSpan.FromSeconds(2))) + { + { PingFailure, 9202 }, + { PingFailure, 9203 }, + { PingFailure, 9204 }, + { PingFailure, 9205 }, + { PingFailure, 9206 }, + { MaxTimeoutReached } + } +); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/request-overrides/respects-allowed-status-code.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/request-overrides/respects-allowed-status-code.asciidoc new file mode 100644 index 00000000000..c386afb595f --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/request-overrides/respects-allowed-status-code.asciidoc @@ -0,0 +1,27 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[allowed-status-codes]] +== Allowed status codes + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways(400)) + .StaticConnectionPool() + .Settings(s => s.DisablePing().MaximumRetries(0)) +); +audit = await audit.TraceCalls( + new ClientCall() { + { BadResponse, 9200 } + }, + new ClientCall(r => r.AllowedStatusCodes(400)) { + { HealthyResponse, 9201 } + } +); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/request-overrides/respects-force-node.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/request-overrides/respects-force-node.asciidoc new file mode 100644 index 00000000000..99d12bdf3bb --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/request-overrides/respects-force-node.asciidoc @@ -0,0 +1,28 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[forcing-nodes]] +== Forcing nodes + +Sometimes you might want to fire a single request to a specific node. You can do so using the `ForceNode` +request configuration. This will ignore the pool and not retry. + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.SucceedAlways()) + .ClientCalls(r => r.OnPort(9208).FailAlways()) + .StaticConnectionPool() + .Settings(s => s.DisablePing()) +); +audit = await audit.TraceCall( + new ClientCall(r => r.ForceNode(new Uri("http://localhost:9208"))) { + { BadResponse, 9208 } + } +); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/request-overrides/respects-max-retry-overrides.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/request-overrides/respects-max-retry-overrides.asciidoc new file mode 100644 index 00000000000..fbee906c27a --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/request-overrides/respects-max-retry-overrides.asciidoc @@ -0,0 +1,76 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[maximum-retries]] +== Maximum Retries + +By default retry as many times as we have nodes. However retries still respect the request timeout. +Meaning if you have a 100 node cluster and a request timeout of 20 seconds we will retry as many times as we can +but give up after 20 seconds + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways()) + .ClientCalls(r => r.OnPort(9209).SucceedAlways()) + .StaticConnectionPool() + .Settings(s => s.DisablePing()) +); +audit = await audit.TraceCall( + new ClientCall(r => r.MaxRetries(2)) { + { BadResponse, 9200 }, + { BadResponse, 9201 }, + { BadResponse, 9202 }, + { MaxRetriesReached } + } +); +---- + +When you have a 100 node cluster you might want to ensure a fixed number of retries. +Remember that the actual number of requests is initial attempt + set number of retries + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways()) + .ClientCalls(r => r.OnPort(9209).SucceedAlways()) + .StaticConnectionPool() + .Settings(s => s.DisablePing().MaximumRetries(5)) +); + +audit = await audit.TraceCall( + new ClientCall(r => r.MaxRetries(2)) { + { BadResponse, 9200 }, + { BadResponse, 9201 }, + { BadResponse, 9202 }, + { MaxRetriesReached } + } +); +---- + +This makes setting any retry setting on a single node connection pool a NOOP, this is by design! +Connection pooling and connection failover is about trying to fail sanely whilst still utilizing available resources and +not giving up on the fail fast principle. It's *NOT* a mechanism for forcing requests to succeed. + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3))) + .ClientCalls(r => r.OnPort(9209).SucceedAlways()) + .SingleNodeConnection() + .Settings(s => s.DisablePing().MaximumRetries(10)) +); + +audit = await audit.TraceCall( + new ClientCall(r => r.MaxRetries(10)) { + { BadResponse, 9200 } + } +); +---- + diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/RoundRobin/RoundRobin.doc.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/round-robin/round-robin.asciidoc similarity index 63% rename from docs/asciidoc/ClientConcepts/ConnectionPooling/RoundRobin/RoundRobin.doc.asciidoc rename to docs/asciidoc/client-concepts/connection-pooling/round-robin/round-robin.asciidoc index 04cbc4ae7a0..ba94b76b7d4 100644 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/RoundRobin/RoundRobin.doc.asciidoc +++ b/docs/asciidoc/client-concepts/connection-pooling/round-robin/round-robin.asciidoc @@ -1,14 +1,22 @@ -Round Robin -Each connection pool round robins over the `live` nodes, to evenly distribute the load over all known nodes. +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current +:github: https://github.com/elastic/elasticsearch-net -== GetNext -GetNext is implemented in a lock free thread safe fashion, meaning each callee gets returned its own cursor to advance +:nuget: https://www.nuget.org/packages + +[[round-robin]] +== Round Robin + +<> and <> connection pools +round robin over the `live` nodes to evenly distribute request load over all known nodes. + +=== GetNext + +`GetNext` is implemented in a lock free thread safe fashion, meaning each callee gets returned its own cursor to advance over the internal list of nodes. This to guarantee each request that needs to fall over tries all the nodes without suffering from noisy neighboors advancing a global cursor. - -[source, csharp] +[source,csharp] ---- var uris = Enumerable.Range(9200, NumberOfNodes).Select(p => new Uri("http://localhost:" + p)); var staticPool = new StaticConnectionPool(uris, randomize: false); @@ -16,56 +24,63 @@ var sniffingPool = new SniffingConnectionPool(uris, randomize: false); this.AssertCreateView(staticPool); this.AssertCreateView(sniffingPool); ---- -Here we have setup a static connection pool seeded with 10 nodes. We force randomizationOnStartup to false -so that we can test the nodes being returned are int the order we expect them to. + +Here we have setup a static connection pool seeded with 10 nodes. We force randomization OnStartup to false +so that we can test the nodes being returned are int the order we expect them to. So what order we expect? Imagine the following: + Thread A calls GetNext first without a local cursor and takes the current from the internal global cursor which is 0. Thread B calls GetNext() second without a local cursor and therefor starts at 1. After this each thread should walk the nodes in successive order using their local cursor e.g Thread A might get 0,1,2,3,5 and thread B will get 1,2,3,4,0. -[source, csharp] +[source,csharp] ---- var startingPositions = Enumerable.Range(0, NumberOfNodes) - .Select(i => pool.CreateView().First()) - .Select(n => n.Uri.Port) - .ToList(); ----- -[source, csharp] ----- + .Select(i => pool.CreateView().First()) + .Select(n => n.Uri.Port) + .ToList(); + var expectedOrder = Enumerable.Range(9200, NumberOfNodes); + startingPositions.Should().ContainInOrder(expectedOrder); ---- What the above code just proved is that each call to GetNext(null) gets assigned the next available node. Lets up the ante: -- call get next over `NumberOfNodes * 2` threads -- on each thread call getnext `NumberOfNodes * 10` times using a local cursor. -We'll validate that each thread sees all the nodes and they they wrap over e.g after node 9209 + +* call get next over `NumberOfNodes * 2` threads + +* on each thread call getnext `NumberOfNodes * 10` times using a local cursor. +We'll validate that each thread sees all the nodes and they they wrap over e.g after node 9209 comes 9200 again -[source, csharp] +[source,csharp] ---- var threadedStartPositions = new ConcurrentBag(); + +var threads = Enumerable.Range(0, 20) + .Select(i => CreateThreadCallingGetNext(pool, threadedStartPositions)) + .ToList(); ---- -[source, csharp] + +[source,csharp] ---- -var threads = Enumerable.Range(0, 20) - .Select(i => CreateThreadCallingGetNext(pool, threadedStartPositions)) - .ToList(); -t.Start(); -t.Join(); +foreach (var t in threads) t.Start(); + +foreach (var t in threads) t.Join(); ---- + Each thread reported the first node it started off lets make sure we see each node twice as the first node because we started `NumberOfNodes * 2` threads -[source, csharp] +[source,csharp] ---- var grouped = threadedStartPositions.GroupBy(p => p).ToList(); ----- -[source, csharp] ----- + grouped.Count().Should().Be(NumberOfNodes); + grouped.Select(p => p.Count()).Should().OnlyContain(p => p == 2); ---- + diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/RoundRobin/SkipDeadNodes.doc.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/round-robin/skip-dead-nodes.asciidoc similarity index 51% rename from docs/asciidoc/ClientConcepts/ConnectionPooling/RoundRobin/SkipDeadNodes.doc.asciidoc rename to docs/asciidoc/client-concepts/connection-pooling/round-robin/skip-dead-nodes.asciidoc index 54688bd9f36..47b980a048a 100644 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/RoundRobin/SkipDeadNodes.doc.asciidoc +++ b/docs/asciidoc/client-concepts/connection-pooling/round-robin/skip-dead-nodes.asciidoc @@ -1,196 +1,220 @@ -Round Robin - Skipping Dead Nodes +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[round-robin-skipping-dead-nodes]] +== Round Robin - Skipping Dead Nodes + When selecting nodes the connection pool will try and skip all the nodes that are marked dead. +=== GetNext -== GetNext GetNext is implemented in a lock free thread safe fashion, meaning each callee gets returned its own cursor to advance over the internal list of nodes. This to guarantee each request that needs to fall over tries all the nodes without suffering from noisy neighboors advancing a global cursor. - -[source, csharp] +[source,csharp] ---- var seeds = Enumerable.Range(9200, NumberOfNodes).Select(p => new Node(new Uri("http://localhost:" + p))).ToList(); + +seeds.First().MarkDead(DateTime.Now.AddDays(1)); + var pool = new StaticConnectionPool(seeds, randomize: false); + var node = pool.CreateView().First(); -node.Uri.Port.Should().Be(9200); -node = pool.CreateView().First(); + node.Uri.Port.Should().Be(9201); + node = pool.CreateView().First(); + node.Uri.Port.Should().Be(9202); +---- + +[source,csharp] +---- var seeds = Enumerable.Range(9200, NumberOfNodes).Select(p => new Node(new Uri("http://localhost:" + p))).ToList(); -seeds.First().MarkDead(DateTime.Now.AddDays(1)); var pool = new StaticConnectionPool(seeds, randomize: false); var node = pool.CreateView().First(); +node.Uri.Port.Should().Be(9200); +node = pool.CreateView().First(); node.Uri.Port.Should().Be(9201); node = pool.CreateView().First(); node.Uri.Port.Should().Be(9202); ---- -After we marke the first node alive again we expect it to be hit again -[source, csharp] +After we marked the first node alive again, we expect it to be hit again + +[source,csharp] ---- seeds.First().MarkAlive(); ----- -[source, csharp] ----- + var node = pool.CreateView().First(); + node.Uri.Port.Should().Be(9201); + node = pool.CreateView().First(); + node.Uri.Port.Should().Be(9202); + node = pool.CreateView().First(); + node.Uri.Port.Should().Be(9200); +---- + +[source,csharp] +---- var dateTimeProvider = new TestableDateTimeProvider(); + var seeds = Enumerable.Range(9200, NumberOfNodes).Select(p => new Node(new Uri("http://localhost:" + p))).ToList(); + seeds.First().MarkDead(dateTimeProvider.Now().AddDays(1)); + var pool = new StaticConnectionPool(seeds, randomize: false, dateTimeProvider: dateTimeProvider); + var node = pool.CreateView().First(); + node.Uri.Port.Should().Be(9201); + node = pool.CreateView().First(); + node.Uri.Port.Should().Be(9202); ---- -If we forward our clock 2 days the node that was marked dead until tomorrow (or yesterday!) should be resurrected -[source, csharp] +If we roll the clock forward two days, the node that was marked dead until tomorrow (or yesterday!) should be resurrected + +[source,csharp] ---- dateTimeProvider.ChangeTime(d => d.AddDays(2)); ----- -[source, csharp] ----- + var n = pool.CreateView().First(); + n.Uri.Port.Should().Be(9201); + n = pool.CreateView().First(); + n.Uri.Port.Should().Be(9202); + n = pool.CreateView().First(); + n.Uri.Port.Should().Be(9200); + n.IsResurrected.Should().BeTrue(); ---- + A cluster with 2 nodes where the second node fails on ping -[source, csharp] +[source,csharp] ---- var audit = new Auditor(() => Framework.Cluster - .Nodes(4) - .ClientCalls(p => p.Succeeds(Always)) - .ClientCalls(p => p.OnPort(9201).FailAlways()) - .ClientCalls(p => p.OnPort(9203).FailAlways()) - .StaticConnectionPool() - .Settings(p=>p.DisablePing()) + .Nodes(4) + .ClientCalls(p => p.Succeeds(Always)) + .ClientCalls(p => p.OnPort(9201).FailAlways()) + .ClientCalls(p => p.OnPort(9203).FailAlways()) + .StaticConnectionPool() + .Settings(p=>p.DisablePing()) ); ---- -[source, csharp] ----- -await audit.TraceCalls( ----- + The first call goes to 9200 which succeeds -[source, csharp] ----- -new ClientCall { - { HealthyResponse, 9200}, - { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(0) } - }, ----- -The 2nd call does a ping on 9201 because its used for the first time. +The 2nd call does a ping on 9201 because its used for the first time. It fails so we wrap over to node 9202 -[source, csharp] ----- -new ClientCall { - { BadResponse, 9201}, - { HealthyResponse, 9202}, ----- Finally we assert that the connectionpool has one node that is marked as dead -[source, csharp] ----- -{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(1) } - }, ----- The next call goes to 9203 which fails so we should wrap over -[source, csharp] ----- -new ClientCall { - { BadResponse, 9203}, - { HealthyResponse, 9200}, - { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } - }, - new ClientCall { - { HealthyResponse, 9202}, - { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } - }, - new ClientCall { - { HealthyResponse, 9200}, - { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } - }, - new ClientCall { - { HealthyResponse, 9202}, - { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } - }, - new ClientCall { - { HealthyResponse, 9200}, - { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } - } +[source,csharp] +---- +await audit.TraceCalls( +new ClientCall { + { HealthyResponse, 9200}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(0) } + }, +new ClientCall { + { BadResponse, 9201}, + { HealthyResponse, 9202}, +{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(1) } + }, +new ClientCall { + { BadResponse, 9203}, + { HealthyResponse, 9200}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } + }, + new ClientCall { + { HealthyResponse, 9202}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } + }, + new ClientCall { + { HealthyResponse, 9200}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } + }, + new ClientCall { + { HealthyResponse, 9202}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } + }, + new ClientCall { + { HealthyResponse, 9200}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } + } ); ---- + A cluster with 2 nodes where the second node fails on ping -[source, csharp] +[source,csharp] ---- var audit = new Auditor(() => Framework.Cluster - .Nodes(4) - .ClientCalls(p => p.Fails(Always)) - .StaticConnectionPool() - .Settings(p=>p.DisablePing()) + .Nodes(4) + .ClientCalls(p => p.Fails(Always)) + .StaticConnectionPool() + .Settings(p=>p.DisablePing()) ); ---- -[source, csharp] ----- -await audit.TraceCalls( ----- + All the calls fail -[source, csharp] ----- -new ClientCall { - { BadResponse, 9200}, - { BadResponse, 9201}, - { BadResponse, 9202}, - { BadResponse, 9203}, - { MaxRetriesReached }, - { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } - }, ----- After all our registered nodes are marked dead we want to sample a single dead node each time to quickly see if the cluster is back up. We do not want to retry all 4 nodes -[source, csharp] ----- -new ClientCall { - { AllNodesDead }, - { Resurrection, 9201}, - { BadResponse, 9201}, - { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } - }, - new ClientCall { - { AllNodesDead }, - { Resurrection, 9202}, - { BadResponse, 9202}, - { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } - }, - new ClientCall { - { AllNodesDead }, - { Resurrection, 9203}, - { BadResponse, 9203}, - { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } - }, - new ClientCall { - { AllNodesDead }, - { Resurrection, 9200}, - { BadResponse, 9200}, - { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } - } +[source,csharp] +---- +await audit.TraceCalls( +new ClientCall { + { BadResponse, 9200}, + { BadResponse, 9201}, + { BadResponse, 9202}, + { BadResponse, 9203}, + { MaxRetriesReached }, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } + }, +new ClientCall { + { AllNodesDead }, + { Resurrection, 9201}, + { BadResponse, 9201}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } + }, + new ClientCall { + { AllNodesDead }, + { Resurrection, 9202}, + { BadResponse, 9202}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } + }, + new ClientCall { + { AllNodesDead }, + { Resurrection, 9203}, + { BadResponse, 9203}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } + }, + new ClientCall { + { AllNodesDead }, + { Resurrection, 9200}, + { BadResponse, 9200}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } + } ); ---- + diff --git a/docs/asciidoc/ClientConcepts/ConnectionPooling/RoundRobin/VolatileUpdates.doc.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/round-robin/volatile-updates.asciidoc similarity index 68% rename from docs/asciidoc/ClientConcepts/ConnectionPooling/RoundRobin/VolatileUpdates.doc.asciidoc rename to docs/asciidoc/client-concepts/connection-pooling/round-robin/volatile-updates.asciidoc index 33f1561476d..6a8fcb40d48 100644 --- a/docs/asciidoc/ClientConcepts/ConnectionPooling/RoundRobin/VolatileUpdates.doc.asciidoc +++ b/docs/asciidoc/client-concepts/connection-pooling/round-robin/volatile-updates.asciidoc @@ -1,28 +1,28 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current +:github: https://github.com/elastic/elasticsearch-net +:nuget: https://www.nuget.org/packages +[[volatile-updates]] +== Volatile Updates -[source, csharp] +[source,csharp] ---- var uris = Enumerable.Range(9200, NumberOfNodes).Select(p => new Uri("http://localhost:" + p)); -var sniffingPool = new SniffingConnectionPool(uris, randomize: false); -Action callSniffing = () => this.AssertCreateView(sniffingPool); -callSniffing.ShouldNotThrow(); -var uris = Enumerable.Range(9200, NumberOfNodes).Select(p => new Uri("http://localhost:" + p)); + var staticPool = new StaticConnectionPool(uris, randomize: false); + Action callStatic = () => this.AssertCreateView(staticPool); + callStatic.ShouldNotThrow(); ---- - -[source, csharp] +[source,csharp] ---- -var threads = Enumerable.Range(0, 50) - .Select(i => CreateReadAndUpdateThread(pool)) - .ToList(); ----- -[source, csharp] ----- -t.Start(); -t.Join(); +var uris = Enumerable.Range(9200, NumberOfNodes).Select(p => new Uri("http://localhost:" + p)); +var sniffingPool = new SniffingConnectionPool(uris, randomize: false); +Action callSniffing = () => this.AssertCreateView(sniffingPool); +callSniffing.ShouldNotThrow(); ---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/sniffing/on-connection-failure.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/sniffing/on-connection-failure.asciidoc new file mode 100644 index 00000000000..b360772e70e --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/sniffing/on-connection-failure.asciidoc @@ -0,0 +1,154 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[sniffing-on-connection-failure]] +== Sniffing on connection failure + +Sniffing on connection is enabled by default when using a connection pool that allows reseeding. +The only IConnectionPool we ship that allows this is the <>. + +This can be very handy to force a refresh of the pools known healthy node by inspecting Elasticsearch itself. +A sniff tries to get the nodes by asking each currently known node until one response. + +Here we seed our connection with 5 known nodes 9200-9204 of which we think +9202, 9203, 9204 are master eligible nodes. Our virtualized cluster will throw once when doing +a search on 9201. This should a sniff to be kicked off. + +When the call fails on 9201 the sniff succeeds and returns a new cluster of healty nodes +this cluster only has 3 nodes and the known masters are 9200 and 9202 but a search on 9201 +still fails once + +After this second failure on 9201 another sniff will be returned a cluster that no +longer fails but looks completely different (9210-9212) we should be able to handle this + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(5) + .MasterEligible(9202, 9203, 9204) + .ClientCalls(r => r.SucceedAlways()) + .ClientCalls(r => r.OnPort(9201).Fails(Once)) +.Sniff(p => p.SucceedAlways(Framework.Cluster + .Nodes(3) + .MasterEligible(9200, 9202) + .ClientCalls(r => r.OnPort(9201).Fails(Once)) +.Sniff(s => s.SucceedAlways(Framework.Cluster + .Nodes(3, 9210) + .MasterEligible(9210, 9212) + .ClientCalls(r => r.SucceedAlways()) + .Sniff(r => r.SucceedAlways()) + )) + )) + .SniffingConnectionPool() + .Settings(s => s.DisablePing().SniffOnStartup(false)) +); +---- + +We assert we do a sniff on our first known master node 9202 + +Our pool should now have three nodes + +We assert we do a sniff on the first master node in our updated cluster + +[source,csharp] +---- +audit = await audit.TraceCalls( +new ClientCall { + { HealthyResponse, 9200 }, + { pool => pool.Nodes.Count.Should().Be(5) } + }, + new ClientCall { + { BadResponse, 9201}, +{ SniffOnFail }, + { SniffSuccess, 9202}, + { HealthyResponse, 9200}, +{ pool => pool.Nodes.Count.Should().Be(3) } + }, + new ClientCall { + { BadResponse, 9201}, +{ SniffOnFail }, + { SniffSuccess, 9200}, + { HealthyResponse, 9210}, + { pool => pool.Nodes.Count.Should().Be(3) } + }, + new ClientCall { { HealthyResponse, 9211 } }, + new ClientCall { { HealthyResponse, 9212 } }, + new ClientCall { { HealthyResponse, 9210 } }, + new ClientCall { { HealthyResponse, 9211 } }, + new ClientCall { { HealthyResponse, 9212 } }, + new ClientCall { { HealthyResponse, 9210 } }, + new ClientCall { { HealthyResponse, 9211 } }, + new ClientCall { { HealthyResponse, 9212 } }, + new ClientCall { { HealthyResponse, 9210 } } +); +---- + +Here we set up our cluster exactly the same as the previous setup +Only we enable pinging (default is true) and make the ping fail + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(5) + .MasterEligible(9202, 9203, 9204) + .Ping(r => r.OnPort(9201).Fails(Once)) + .Sniff(p => p.SucceedAlways(Framework.Cluster + .Nodes(3) + .MasterEligible(9200, 9202) + .Ping(r => r.OnPort(9201).Fails(Once)) + .Sniff(s => s.SucceedAlways(Framework.Cluster + .Nodes(3, 9210) + .MasterEligible(9210, 9211) + .Ping(r => r.SucceedAlways()) + .Sniff(r => r.SucceedAlways()) + )) + )) + .SniffingConnectionPool() + .Settings(s => s.SniffOnStartup(false)) +); +---- + +We assert we do a sniff on our first known master node 9202 + +Our pool should now have three nodes + +We assert we do a sniff on the first master node in our updated cluster + +9210 was already pinged after the sniff returned the new nodes + +[source,csharp] +---- +audit = await audit.TraceCalls( + new ClientCall { + { PingSuccess, 9200 }, + { HealthyResponse, 9200 }, + { pool => pool.Nodes.Count.Should().Be(5) } + }, + new ClientCall { + { PingFailure, 9201}, +{ SniffOnFail }, + { SniffSuccess, 9202}, + { PingSuccess, 9200}, + { HealthyResponse, 9200}, +{ pool => pool.Nodes.Count.Should().Be(3) } + }, + new ClientCall { + { PingFailure, 9201}, +{ SniffOnFail }, + { SniffSuccess, 9200}, + { PingSuccess, 9210}, + { HealthyResponse, 9210}, + { pool => pool.Nodes.Count.Should().Be(3) } + }, + new ClientCall { { PingSuccess, 9211 }, { HealthyResponse, 9211 } }, + new ClientCall { { PingSuccess, 9212 }, { HealthyResponse, 9212 } }, +new ClientCall { { HealthyResponse, 9210 } }, + new ClientCall { { HealthyResponse, 9211 } }, + new ClientCall { { HealthyResponse, 9212 } }, + new ClientCall { { HealthyResponse, 9210 } } +); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/sniffing/on-stale-cluster-state.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/sniffing/on-stale-cluster-state.asciidoc new file mode 100644 index 00000000000..b3003cdf3d5 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/sniffing/on-stale-cluster-state.asciidoc @@ -0,0 +1,100 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[sniffing-periodically]] +== Sniffing periodically + +Connection pools that return true for `SupportsReseeding` can be configured to sniff periodically. +In addition to sniffing on startup and sniffing on failures, sniffing periodically can benefit scenerio's where +clusters are often scaled horizontally during peak hours. An application might have a healthy view of a subset of the nodes +but without sniffing periodically it will never find the nodes that have been added to help out with load + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .MasterEligible(9202, 9203, 9204) + .ClientCalls(r => r.SucceedAlways()) + .Sniff(s => s.SucceedAlways(Framework.Cluster + .Nodes(100) + .MasterEligible(9202, 9203, 9204) + .ClientCalls(r => r.SucceedAlways()) + .Sniff(ss => ss.SucceedAlways(Framework.Cluster + .Nodes(10) + .MasterEligible(9202, 9203, 9204) + .ClientCalls(r => r.SucceedAlways()) + )) + )) + .SniffingConnectionPool() + .Settings(s => s + .DisablePing() + .SniffOnConnectionFault(false) + .SniffOnStartup(false) + .SniffLifeSpan(TimeSpan.FromMinutes(30)) + ) +); +---- + +healty cluster all nodes return healthy responses + +[source,csharp] +---- +audit = await audit.TraceCalls( + new ClientCall { { HealthyResponse, 9200 } }, + new ClientCall { { HealthyResponse, 9201 } }, + new ClientCall { { HealthyResponse, 9202 } }, + new ClientCall { { HealthyResponse, 9203 } }, + new ClientCall { { HealthyResponse, 9204 } }, + new ClientCall { { HealthyResponse, 9205 } }, + new ClientCall { { HealthyResponse, 9206 } }, + new ClientCall { { HealthyResponse, 9207 } }, + new ClientCall { { HealthyResponse, 9208 } }, + new ClientCall { { HealthyResponse, 9209 } }, + new ClientCall { + { HealthyResponse, 9200 }, + { pool => pool.Nodes.Count.Should().Be(10) } + } +); +---- + +Now let's forward the clock 31 minutes, our sniff lifespan should now go state +and the first call should do a sniff which discovered we scaled up to a 100 nodes! + +[source,csharp] +---- +audit.ChangeTime(d => d.AddMinutes(31)); +---- + +a sniff is done first and it prefers the first node master node + +[source,csharp] +---- +audit = await audit.TraceCalls( + new ClientCall { +{ SniffOnStaleCluster }, + { SniffSuccess, 9202 }, + { HealthyResponse, 9201 }, + { pool => pool.Nodes.Count.Should().Be(100) } + } +); + +audit.ChangeTime(d => d.AddMinutes(31)); +---- + +a sniff is done first and it prefers the first node master node + +[source,csharp] +---- +audit = await audit.TraceCalls( + new ClientCall { +{ SniffOnStaleCluster }, + { SniffSuccess, 9202 }, + { HealthyResponse, 9200 }, + { pool => pool.Nodes.Count.Should().Be(10) } + } +); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/sniffing/on-startup.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/sniffing/on-startup.asciidoc new file mode 100644 index 00000000000..42704963b28 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/sniffing/on-startup.asciidoc @@ -0,0 +1,162 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[sniffing-on-startup]] +== Sniffing on startup + +<> that return true for `SupportsReseeding` will sniff on startup by default. + +We can demonstrate this by creating a _virtual_ Elasticsearch cluster with NEST's Test Framework. +Here we create a 10 node cluster that uses a <>, setting +sniff to fail on all nodes *_except_* 9202 + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .Sniff(s => s.Fails(Always)) + .Sniff(s => s.OnPort(9202).Succeeds(Always)) + .SniffingConnectionPool() + .AllDefaults() +); +---- + +When the client call is made, we can see from the audit trail that the pool first tried to sniff on startup, +with a sniff failure on 9200 and 9201, followed by a sniff success on 9202. A ping and healthy response are made on +9200 + +[source,csharp] +---- +await audit.TraceCall(new ClientCall + { + { SniffOnStartup}, + { SniffFailure, 9200}, + { SniffFailure, 9201}, + { SniffSuccess, 9202}, + { PingSuccess , 9200}, + { HealthyResponse, 9200} +}); +---- + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .Sniff(s => s.Fails(Always)) + .Sniff(s => s.OnPort(9202).Succeeds(Always, Framework.Cluster.Nodes(8, startFrom: 9204))) + .SniffingConnectionPool() + .AllDefaults() +); + +await audit.TraceCall(new ClientCall { +{ SniffOnStartup}, +{ SniffFailure, 9200}, +{ SniffFailure, 9201}, +{ SniffSuccess, 9202}, +{ PingSuccess, 9204}, +{ HealthyResponse, 9204} + }); +---- + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .Sniff(s => s.Fails(Always)) + .Sniff(s => s.OnPort(9209).Succeeds(Always)) + .SniffingConnectionPool() + .AllDefaults() +); + +await audit.TraceCall(new ClientCall { +{ SniffOnStartup}, +{ SniffFailure, 9200}, +{ SniffFailure, 9201}, +{ SniffFailure, 9202}, +{ SniffFailure, 9203}, +{ SniffFailure, 9204}, +{ SniffFailure, 9205}, +{ SniffFailure, 9206}, +{ SniffFailure, 9207}, +{ SniffFailure, 9208}, +{ SniffSuccess, 9209}, +{ PingSuccess, 9200}, +{ HealthyResponse, 9200} + }); +---- + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(new[] { + new Node(new Uri("http://localhost:9200")) { MasterEligible = false }, + new Node(new Uri("http://localhost:9201")) { MasterEligible = false }, + new Node(new Uri("http://localhost:9202")) { MasterEligible = true }, + }) + .Sniff(s => s.Succeeds(Always)) + .SniffingConnectionPool() + .AllDefaults() +); + +await audit.TraceCall(new ClientCall { +{ SniffOnStartup}, +{ SniffSuccess, 9202}, +{ PingSuccess, 9200}, +{ HealthyResponse, 9200} + }); +---- + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(new[] { + new Node(new Uri("http://localhost:9200")) { MasterEligible = true }, + new Node(new Uri("http://localhost:9201")) { MasterEligible = true }, + new Node(new Uri("http://localhost:9202")) { MasterEligible = false }, + }) + .Sniff(s => s.Fails(Always)) + .Sniff(s => s.OnPort(9202).Succeeds(Always)) + .SniffingConnectionPool() + .AllDefaults() +); + +await audit.TraceCall(new ClientCall { +{ SniffOnStartup}, +{ SniffFailure, 9200}, +{ SniffFailure, 9201}, +{ SniffSuccess, 9202}, +{ PingSuccess, 9200}, +{ HealthyResponse, 9200} + }); +---- + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .Sniff(s => s.Fails(Always)) + .Sniff(s => s.OnPort(9202).Succeeds(Always)) + .SniffingConnectionPool() + .AllDefaults() +); +await audit.TraceCalls( + new ClientCall + { + { SniffOnStartup}, + { SniffFailure, 9200}, + { SniffFailure, 9201}, + { SniffSuccess, 9202}, + { PingSuccess , 9200}, + { HealthyResponse, 9200} + }, + new ClientCall + { + { PingSuccess, 9201}, + { HealthyResponse, 9201} + } +); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/sniffing/role-detection.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/sniffing/role-detection.asciidoc new file mode 100644 index 00000000000..69e045e1509 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/sniffing/role-detection.asciidoc @@ -0,0 +1,148 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[sniffing-role-detection]] +== Sniffing role detection + +When we sniff the cluster state, we detect the role of the node, whether it's master eligible and holds data. +We use this information when selecting a node to perform an API call on. + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .Sniff(s => s.Fails(Always)) + .Sniff(s => s.OnPort(9202) + .Succeeds(Always, Framework.Cluster.Nodes(8).StoresNoData(9200, 9201, 9202)) + ) + .SniffingConnectionPool() + .AllDefaults() +) +{ + AssertPoolBeforeCall = (pool) => + { + pool.Should().NotBeNull(); + pool.Nodes.Should().HaveCount(10); + pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(10); + }, + + AssertPoolAfterCall = (pool) => + { + pool.Should().NotBeNull(); + pool.Nodes.Should().HaveCount(8); + pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(5); + } +}; + +await audit.TraceStartup(); +---- + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .Sniff(s => s.SucceedAlways() + .Succeeds(Always, Framework.Cluster.Nodes(8).StoresNoData(9200, 9201, 9202).SniffShouldReturnFqdn()) + ) + .SniffingConnectionPool() + .AllDefaults() +) +{ + AssertPoolBeforeCall = (pool) => + { + pool.Should().NotBeNull(); + pool.Nodes.Should().HaveCount(10); + pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(10); + pool.Nodes.Should().OnlyContain(n => n.Uri.Host == "localhost"); + }, + + AssertPoolAfterCall = (pool) => + { + pool.Should().NotBeNull(); + pool.Nodes.Should().HaveCount(8); + pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(5); + pool.Nodes.Should().OnlyContain(n => n.Uri.Host.StartsWith("fqdn") && !n.Uri.Host.Contains("/")); + } +}; + +await audit.TraceStartup(); +---- + +[source,csharp] +---- +var node = SniffAndReturnNode(); + +node.MasterEligible.Should().BeTrue(); + +node.HoldsData.Should().BeFalse(); + +node = await SniffAndReturnNodeAsync(); + +node.MasterEligible.Should().BeTrue(); + +node.HoldsData.Should().BeFalse(); +---- + +[source,csharp] +---- +var pipeline = CreatePipeline(); + +pipeline.Sniff(); +---- + +[source,csharp] +---- +var pipeline = CreatePipeline(); + +await pipeline.SniffAsync(); +---- + +[source,csharp] +---- +this._settings = + this._cluster.Client(u => new SniffingConnectionPool(new[] {u}), c => c.PrettyJson()).ConnectionSettings; + +var pipeline = new RequestPipeline(this._settings, DateTimeProvider.Default, new MemoryStreamFactory(), + new SearchRequestParameters()); +---- + +[source,csharp] +---- +var nodes = this._settings.ConnectionPool.Nodes; + +nodes.Should().NotBeEmpty().And.HaveCount(1); + +var node = nodes.First(); +---- + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(10) + .Sniff(s => s.Fails(Always)) + .Sniff(s => s.OnPort(9202) + .Succeeds(Always, Framework.Cluster.Nodes(8).MasterEligible(9200, 9201, 9202)) + ) + .SniffingConnectionPool() + .AllDefaults() +) +{ + AssertPoolBeforeCall = (pool) => + { + pool.Should().NotBeNull(); + pool.Nodes.Should().HaveCount(10); + pool.Nodes.Where(n => n.MasterEligible).Should().HaveCount(10); + }, + AssertPoolAfterCall = (pool) => + { + pool.Should().NotBeNull(); + pool.Nodes.Should().HaveCount(8); + pool.Nodes.Where(n => n.MasterEligible).Should().HaveCount(3); + } +}; +await audit.TraceStartup(); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/sticky/skip-dead-nodes.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/sticky/skip-dead-nodes.asciidoc new file mode 100644 index 00000000000..87de87660d1 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/sticky/skip-dead-nodes.asciidoc @@ -0,0 +1,197 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[skip-dead-nodes]] +== Skip Dead Nodes + +Sticky - Skipping Dead Nodes +When selecting nodes the connection pool will try and skip all the nodes that are marked dead. + +[source,csharp] +---- +var seeds = Enumerable.Range(9200, NumberOfNodes).Select(p => new Node(new Uri("http://localhost:" + p))).ToList(); + +seeds.First().MarkDead(DateTime.Now.AddDays(1)); + +var pool = new StickyConnectionPool(seeds); + +var node = pool.CreateView().First(); + +node.Uri.Port.Should().Be(9201); + +node = pool.CreateView().First(); + +node.Uri.Port.Should().Be(9201); +---- + +[source,csharp] +---- +var seeds = Enumerable.Range(9200, NumberOfNodes).Select(p => new Node(new Uri("http://localhost:" + p))).ToList(); +var pool = new StickyConnectionPool(seeds); +var node = pool.CreateView().First(); +node.Uri.Port.Should().Be(9200); +node = pool.CreateView().First(); +node.Uri.Port.Should().Be(9200); +node = pool.CreateView().First(); +node.Uri.Port.Should().Be(9200); +---- + +After we marke the first node alive again we expect it to be hit again + +[source,csharp] +---- +seeds.First().MarkAlive(); + +var node = pool.CreateView().First(); + +node.Uri.Port.Should().Be(9200); + +node = pool.CreateView().First(); + +node.Uri.Port.Should().Be(9200); + +node = pool.CreateView().First(); + +node.Uri.Port.Should().Be(9200); +---- + +[source,csharp] +---- +var dateTimeProvider = new TestableDateTimeProvider(); + +var seeds = Enumerable.Range(9200, NumberOfNodes).Select(p => new Node(new Uri("http://localhost:" + p))).ToList(); + +seeds.First().MarkDead(dateTimeProvider.Now().AddDays(1)); + +var pool = new StickyConnectionPool(seeds, dateTimeProvider: dateTimeProvider); + +var node = pool.CreateView().First(); + +node.Uri.Port.Should().Be(9201); + +node = pool.CreateView().First(); + +node.Uri.Port.Should().Be(9201); +---- + +If we forward our clock 2 days the node that was marked dead until tomorrow (or yesterday!) should be resurrected + +[source,csharp] +---- +dateTimeProvider.ChangeTime(d => d.AddDays(2)); + +var n = pool.CreateView().First(); + +n.Uri.Port.Should().Be(9200); + +n = pool.CreateView().First(); + +n.Uri.Port.Should().Be(9200); + +n = pool.CreateView().First(); + +n.Uri.Port.Should().Be(9200); + +n.IsResurrected.Should().BeTrue(); +---- + +A cluster with 2 nodes where the second node fails on ping + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(4) + .ClientCalls(p => p.Succeeds(Always)) + .ClientCalls(p => p.OnPort(9200).FailAlways()) + .ClientCalls(p => p.OnPort(9201).FailAlways()) + .StickyConnectionPool() + .Settings(p => p.DisablePing()) +); +---- + +The first call goes to 9200 which succeeds + +The 2nd call does a ping on 9201 because its used for the first time. +It fails so we wrap over to node 9202 + +Finally we assert that the connectionpool has one node that is marked as dead + +[source,csharp] +---- +await audit.TraceCalls( +new ClientCall { + { BadResponse, 9200}, + { BadResponse, 9201}, + { HealthyResponse, 9202}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } + }, +new ClientCall { + { HealthyResponse, 9202}, +{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } + }, + new ClientCall { + { HealthyResponse, 9202}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) } + } +); +---- + +A cluster with 2 nodes where the second node fails on ping + +[source,csharp] +---- +var audit = new Auditor(() => Framework.Cluster + .Nodes(4) + .ClientCalls(p => p.Fails(Always)) + .StickyConnectionPool() + .Settings(p => p.DisablePing()) +); +---- + +All the calls fail + +After all our registered nodes are marked dead we want to sample a single dead node +each time to quickly see if the cluster is back up. We do not want to retry all 4 +nodes + +[source,csharp] +---- +await audit.TraceCalls( +new ClientCall { + { BadResponse, 9200}, + { BadResponse, 9201}, + { BadResponse, 9202}, + { BadResponse, 9203}, + { MaxRetriesReached }, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } + }, +new ClientCall { + { AllNodesDead }, + { Resurrection, 9200}, + { BadResponse, 9200}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } + }, + new ClientCall { + { AllNodesDead }, + { Resurrection, 9201}, + { BadResponse, 9201}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } + }, + new ClientCall { + { AllNodesDead }, + { Resurrection, 9202}, + { BadResponse, 9202}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } + }, + new ClientCall { + { AllNodesDead }, + { Resurrection, 9203}, + { BadResponse, 9203}, + { pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) } + } +); +---- + diff --git a/docs/asciidoc/client-concepts/connection-pooling/sticky/sticky.asciidoc b/docs/asciidoc/client-concepts/connection-pooling/sticky/sticky.asciidoc new file mode 100644 index 00000000000..30634533bb2 --- /dev/null +++ b/docs/asciidoc/client-concepts/connection-pooling/sticky/sticky.asciidoc @@ -0,0 +1,36 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[sticky]] +== Sticky + +Sticky +Each connection pool returns the first `live` node so that it is sticky between requests + +[source,csharp] +---- +var numberOfNodes = 10; +var uris = Enumerable.Range(9200, numberOfNodes).Select(p => new Uri("http://localhost:" + p)); +var pool = new StickyConnectionPool(uris); +---- + +Here we have setup a sticky connection pool seeded with 10 nodes. +So what order we expect? Imagine the following: + +Thread A calls GetNext and gets returned the first live node +Thread B calls GetNext() and gets returned the same node as it's still the first live. + +[source,csharp] +---- +var startingPositions = Enumerable.Range(0, numberOfNodes) + .Select(i => pool.CreateView().First()) + .Select(n => n.Uri.Port) + .ToList(); + +var expectedOrder = Enumerable.Repeat(9200, numberOfNodes); +startingPositions.Should().ContainInOrder(expectedOrder); +---- + diff --git a/docs/asciidoc/ClientConcepts/HighLevel/CovariantHits/CovariantSearchResults.doc.asciidoc b/docs/asciidoc/client-concepts/high-level/covariant-hits/covariant-search-results.asciidoc similarity index 64% rename from docs/asciidoc/ClientConcepts/HighLevel/CovariantHits/CovariantSearchResults.doc.asciidoc rename to docs/asciidoc/client-concepts/high-level/covariant-hits/covariant-search-results.asciidoc index 0ccfe32fdd1..0d14cb12ff0 100644 --- a/docs/asciidoc/ClientConcepts/HighLevel/CovariantHits/CovariantSearchResults.doc.asciidoc +++ b/docs/asciidoc/client-concepts/high-level/covariant-hits/covariant-search-results.asciidoc @@ -1,223 +1,262 @@ -# Covariant Search Results - -NEST directly supports returning covariant result sets. -Meaning a result can be typed to an interface or baseclass -but the actual instance type of the result can be that of the subclass directly - -Let look at an example, imagine we want to search over multiple types that all implement -`ISearchResult` - - - - -We have three implementations of `ISearchResult` namely `A`, `B` and `C` - - -The most straightforward way to search over multiple types is to -type the response to the parent interface or base class -and pass the actual types we want to search over using `.Types()` - -[source, csharp] ----- -var result = this._client.Search(s => s - .Type(Types.Type(typeof(A), typeof(B), typeof(C))) - .Size(100) -); ----- -Nest will translate this to a search over /index/a,b,c/_search. -hits that have `"_type" : "a"` will be serialized to `A` and so forth - -[source, csharp] ----- -result.IsValid.Should().BeTrue(); ----- -Here we assume our response is valid and that we received the 100 documents -we are expecting. Remember `result.Documents` is an `IEnumerable -ISearchResult -` - -[source, csharp] ----- -result.Documents.Count().Should().Be(100); ----- -To prove the returned result set is covariant we filter the documents based on their -actual type and assert the returned subsets are the expected sizes - -[source, csharp] ----- -var aDocuments = result.Documents.OfType(); ----- -[source, csharp] ----- -var bDocuments = result.Documents.OfType(); -var cDocuments = result.Documents.OfType(); -aDocuments.Count().Should().Be(25); -bDocuments.Count().Should().Be(25); -cDocuments.Count().Should().Be(50); ----- -and assume that properties that only exist on the subclass itself are properly filled - -[source, csharp] ----- -aDocuments.Should().OnlyContain(a => a.PropertyOnA > 0); ----- -[source, csharp] ----- -bDocuments.Should().OnlyContain(a => a.PropertyOnB > 0); -cDocuments.Should().OnlyContain(a => a.PropertyOnC > 0); ----- -A more low level approach is to inspect the hit yourself and determine the CLR type to deserialize to - -[source, csharp] ----- -var result = this._client.Search(s => s - .ConcreteTypeSelector((d, h) => h.Type == "a" ? typeof(A) : h.Type == "b" ? typeof(B) : typeof(C)) - .Size(100) -); ----- -here for each hit we'll call the delegate with `d` which a dynamic representation of the `_source` -and a typed `h` which represents the encapsulating hit. - -[source, csharp] ----- -result.IsValid.Should().BeTrue(); ----- -Here we assume our response is valid and that we received the 100 documents -we are expecting. Remember `result.Documents` is an `IEnumerable -ISearchResult -` - -[source, csharp] ----- -result.Documents.Count().Should().Be(100); ----- -To prove the returned result set is covariant we filter the documents based on their -actual type and assert the returned subsets are the expected sizes - -[source, csharp] ----- -var aDocuments = result.Documents.OfType(); ----- -[source, csharp] ----- -var bDocuments = result.Documents.OfType(); -var cDocuments = result.Documents.OfType(); -aDocuments.Count().Should().Be(25); -bDocuments.Count().Should().Be(25); -cDocuments.Count().Should().Be(50); ----- -and assume that properties that only exist on the subclass itself are properly filled - -[source, csharp] ----- -aDocuments.Should().OnlyContain(a => a.PropertyOnA > 0); ----- -[source, csharp] ----- -bDocuments.Should().OnlyContain(a => a.PropertyOnB > 0); -cDocuments.Should().OnlyContain(a => a.PropertyOnC > 0); ----- -Scroll also supports CovariantSearchResponses - - -Scroll() is a continuation of a previous Search() so Types() are lost. -You can hint the type types again using CovariantTypes() - -[source, csharp] ----- -var result = this._client.Scroll(TimeSpan.FromMinutes(60), "scrollId", s => s - .CovariantTypes(Types.Type(typeof(A), typeof(B), typeof(C))) -); ----- -Nest will translate this to a search over /index/a,b,c/_search. -hits that have `"_type" : "a"` will be serialized to `A` and so forth - -[source, csharp] ----- -result.IsValid.Should().BeTrue(); ----- -Here we assume our response is valid and that we received the 100 documents -we are expecting. Remember `result.Documents` is an `IEnumerable -ISearchResult -` - -[source, csharp] ----- -result.Documents.Count().Should().Be(100); ----- -To prove the returned result set is covariant we filter the documents based on their -actual type and assert the returned subsets are the expected sizes - -[source, csharp] ----- -var aDocuments = result.Documents.OfType(); ----- -[source, csharp] ----- -var bDocuments = result.Documents.OfType(); -var cDocuments = result.Documents.OfType(); -aDocuments.Count().Should().Be(25); -bDocuments.Count().Should().Be(25); -cDocuments.Count().Should().Be(50); ----- -and assume that properties that only exist on the subclass itself are properly filled - -[source, csharp] ----- -aDocuments.Should().OnlyContain(a => a.PropertyOnA > 0); ----- -[source, csharp] ----- -bDocuments.Should().OnlyContain(a => a.PropertyOnB > 0); -cDocuments.Should().OnlyContain(a => a.PropertyOnC > 0); ----- -The more low level concrete type selector can also be specified on scroll - -[source, csharp] ----- -var result = this._client.Scroll(TimeSpan.FromMinutes(1), "scrollid", s => s - .ConcreteTypeSelector((d, h) => h.Type == "a" ? typeof(A) : h.Type == "b" ? typeof(B) : typeof(C)) -); ----- -here for each hit we'll call the delegate with `d` which a dynamic representation of the `_source` -and a typed `h` which represents the encapsulating hit. - -[source, csharp] ----- -result.IsValid.Should().BeTrue(); ----- -Here we assume our response is valid and that we received the 100 documents -we are expecting. Remember `result.Documents` is an `IEnumerable -ISearchResult -` - -[source, csharp] ----- -result.Documents.Count().Should().Be(100); ----- -To prove the returned result set is covariant we filter the documents based on their -actual type and assert the returned subsets are the expected sizes - -[source, csharp] ----- -var aDocuments = result.Documents.OfType(); ----- -[source, csharp] ----- -var bDocuments = result.Documents.OfType(); -var cDocuments = result.Documents.OfType(); -aDocuments.Count().Should().Be(25); -bDocuments.Count().Should().Be(25); -cDocuments.Count().Should().Be(50); ----- -and assume that properties that only exist on the subclass itself are properly filled - -[source, csharp] ----- -aDocuments.Should().OnlyContain(a => a.PropertyOnA > 0); ----- -[source, csharp] ----- -bDocuments.Should().OnlyContain(a => a.PropertyOnB > 0); -cDocuments.Should().OnlyContain(a => a.PropertyOnC > 0); ----- +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[covariant-search-results]] +== Covariant Search Results + +NEST directly supports returning covariant result sets. +Meaning a result can be typed to an interface or base class +but the actual instance type of the result can be that of the subclass directly + +Let's look at an example; Imagine we want to search over multiple types that all implement `ISearchResult` + +[source,csharp] +---- +public interface ISearchResult +{ + string Name { get; set; } +} +---- + +We have three implementations of `ISearchResult` namely `A`, `B` and `C` + +[source,csharp] +---- +public class A : ISearchResult +{ + public string Name { get; set; } + public int PropertyOnA { get; set; } +} + +public class B : ISearchResult +{ + public string Name { get; set; } + public int PropertyOnB { get; set; } +} + +public class C : ISearchResult +{ + public string Name { get; set; } + public int PropertyOnC { get; set; } +} +---- + +=== Using Types + +The most straightforward way to search over multiple types is to +type the response to the parent interface or base class +and pass the actual types we want to search over using `.Type()` + +[source,csharp] +---- +var result = this._client.Search(s => s + .Type(Types.Type(typeof(A), typeof(B), typeof(C))) + .Size(100) +); +---- + +NEST will translate this to a search over `/index/a,b,c/_search`; +hits that have `"_type" : "a"` will be serialized to `A` and so forth + +Here we assume our response is valid and that we received the 100 documents +we are expecting. Remember `result.Documents` is an `IEnumerable` + +[source,csharp] +---- +result.IsValid.Should().BeTrue(); + +result.Documents.Count().Should().Be(100); +---- + +To prove the returned result set is covariant we filter the documents based on their +actual type and assert the returned subsets are the expected sizes + +[source,csharp] +---- +var aDocuments = result.Documents.OfType(); + +var bDocuments = result.Documents.OfType(); +var cDocuments = result.Documents.OfType(); +aDocuments.Count().Should().Be(25); +bDocuments.Count().Should().Be(25); +cDocuments.Count().Should().Be(50); +---- + +and assume that properties that only exist on the subclass itself are properly filled + +[source,csharp] +---- +aDocuments.Should().OnlyContain(a => a.PropertyOnA > 0); + +bDocuments.Should().OnlyContain(a => a.PropertyOnB > 0); +cDocuments.Should().OnlyContain(a => a.PropertyOnC > 0); +---- + +=== Using ConcreteTypeSelector + +A more low level approach is to inspect the hit yourself and determine the CLR type to deserialize to + +[source,csharp] +---- +var result = this._client.Search(s => s + .ConcreteTypeSelector((d, h) => h.Type == "a" ? typeof(A) : h.Type == "b" ? typeof(B) : typeof(C)) + .Size(100) +); +---- + +here for each hit we'll call the delegate passed to `ConcreteTypeSelector where + +* `d` is a representation of the `_source` exposed as a `dynamic` type + +* a typed `h` which represents the encapsulating hit of the source i.e. `Hit` + +Here we assume our response is valid and that we received the 100 documents +we are expecting. Remember `result.Documents` is an `IEnumerable` + +[source,csharp] +---- +result.IsValid.Should().BeTrue(); + +result.Documents.Count().Should().Be(100); +---- + +To prove the returned result set is covariant we filter the documents based on their +actual type and assert the returned subsets are the expected sizes + +[source,csharp] +---- +var aDocuments = result.Documents.OfType(); + +var bDocuments = result.Documents.OfType(); + +var cDocuments = result.Documents.OfType(); + +aDocuments.Count().Should().Be(25); + +bDocuments.Count().Should().Be(25); + +cDocuments.Count().Should().Be(50); +---- + +and assume that properties that only exist on the subclass itself are properly filled + +[source,csharp] +---- +aDocuments.Should().OnlyContain(a => a.PropertyOnA > 0); + +bDocuments.Should().OnlyContain(a => a.PropertyOnB > 0); + +cDocuments.Should().OnlyContain(a => a.PropertyOnC > 0); +---- + +=== Using CovariantTypes() + +The Scroll API is a continuation of the previous Search example so Types() are lost. +You can hint at the types using `.CovariantTypes()` + +[source,csharp] +---- +var result = this._client.Scroll(TimeSpan.FromMinutes(60), "scrollId", s => s + .CovariantTypes(Types.Type(typeof(A), typeof(B), typeof(C))) +); +---- + +NEST will translate this to a search over `/index/a,b,c/_search`; +hits that have `"_type" : "a"` will be serialized to `A` and so forth + +Here we assume our response is valid and that we received the 100 documents +we are expecting. Remember `result.Documents` is an `IEnumerable` + +[source,csharp] +---- +result.IsValid.Should().BeTrue(); + +result.Documents.Count().Should().Be(100); +---- + +To prove the returned result set is covariant we filter the documents based on their +actual type and assert the returned subsets are the expected sizes + +[source,csharp] +---- +var aDocuments = result.Documents.OfType(); + +var bDocuments = result.Documents.OfType(); + +var cDocuments = result.Documents.OfType(); + +aDocuments.Count().Should().Be(25); + +bDocuments.Count().Should().Be(25); + +cDocuments.Count().Should().Be(50); +---- + +and assume that properties that only exist on the subclass itself are properly filled + +[source,csharp] +---- +aDocuments.Should().OnlyContain(a => a.PropertyOnA > 0); + +bDocuments.Should().OnlyContain(a => a.PropertyOnB > 0); + +cDocuments.Should().OnlyContain(a => a.PropertyOnC > 0); +---- + +The more low level concrete type selector can also be specified on scroll + +[source,csharp] +---- +var result = this._client.Scroll(TimeSpan.FromMinutes(1), "scrollid", s => s + .ConcreteTypeSelector((d, h) => h.Type == "a" ? typeof(A) : h.Type == "b" ? typeof(B) : typeof(C)) +); +---- + +As before, within the delegate passed to `.ConcreteTypeSelector` + +* `d` is the `_source` typed as `dynamic` + +* `h` is the encapsulating typed hit + +Here we assume our response is valid and that we received the 100 documents +we are expecting. Remember `result.Documents` is an `IEnumerable` + +[source,csharp] +---- +result.IsValid.Should().BeTrue(); + +result.Documents.Count().Should().Be(100); +---- + +To prove the returned result set is covariant we filter the documents based on their +actual type and assert the returned subsets are the expected sizes + +[source,csharp] +---- +var aDocuments = result.Documents.OfType(); + +var bDocuments = result.Documents.OfType(); + +var cDocuments = result.Documents.OfType(); + +aDocuments.Count().Should().Be(25); + +bDocuments.Count().Should().Be(25); + +cDocuments.Count().Should().Be(50); +---- + +and assume that properties that only exist on the subclass itself are properly filled + +[source,csharp] +---- +aDocuments.Should().OnlyContain(a => a.PropertyOnA > 0); + +bDocuments.Should().OnlyContain(a => a.PropertyOnB > 0); + +cDocuments.Should().OnlyContain(a => a.PropertyOnC > 0); +---- + diff --git a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/DocumentPaths.doc.asciidoc b/docs/asciidoc/client-concepts/high-level/inference/document-paths.asciidoc similarity index 61% rename from docs/asciidoc/ClientConcepts/HighLevel/Inferrence/DocumentPaths.doc.asciidoc rename to docs/asciidoc/client-concepts/high-level/inference/document-paths.asciidoc index f493e031c5e..16fa15261b9 100644 --- a/docs/asciidoc/ClientConcepts/HighLevel/Inferrence/DocumentPaths.doc.asciidoc +++ b/docs/asciidoc/client-concepts/high-level/inference/document-paths.asciidoc @@ -1,105 +1,125 @@ -# DocumentPaths -Many API's in elasticsearch describe a path to a document. In NEST besides generating a constructor that takes -and Index, Type and Id seperately we also generate a constructor taking a DocumentPath that allows you to describe the path -to your document more succintly +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net +:nuget: https://www.nuget.org/packages -Manually newing +[[document-paths]] +== Document Paths + +Many API's in Elasticsearch describe a path to a document. In NEST, besides generating a constructor that takes +and Index, Type and Id seperately, we also generate a constructor taking a `DocumentPath` that allows you to describe the path +to your document more succintly + +=== Creating new instances here we create a new document path based on Project with the id 1 -[source, csharp] +[source,csharp] ---- IDocumentPath path = new DocumentPath(1); ----- -[source, csharp] ----- + Expect("project").WhenSerializing(path.Index); Expect("project").WhenSerializing(path.Type); Expect(1).WhenSerializing(path.Id); ---- + You can still override the inferred index and type name -[source, csharp] +[source,csharp] ---- path = new DocumentPath(1).Type("project1"); ----- -[source, csharp] ----- + Expect("project1").WhenSerializing(path.Type); path = new DocumentPath(1).Index("project1"); Expect("project1").WhenSerializing(path.Index); ---- -there is also a static way to describe such paths -[source, csharp] +and there is also a static way to describe such paths + +[source,csharp] ---- path = DocumentPath.Id(1); ----- -[source, csharp] ----- + Expect("project").WhenSerializing(path.Index); Expect("project").WhenSerializing(path.Type); Expect(1).WhenSerializing(path.Id); -var project = new Project { Name = "hello-world" }; ---- -here we create a new document path based on a Project -[source, csharp] +=== Creating from a document type instance + +if you have an instance of your document you can use it as well generate document paths + +[source,csharp] ---- -IDocumentPath path = new DocumentPath(project); +var project = new Project { Name = "hello-world" }; ---- -[source, csharp] + +here we create a new document path based on the instance of `Project`, project + +[source,csharp] ---- +IDocumentPath path = new DocumentPath(project); + Expect("project").WhenSerializing(path.Index); + Expect("project").WhenSerializing(path.Type); + Expect("hello-world").WhenSerializing(path.Id); ---- + You can still override the inferred index and type name -[source, csharp] +[source,csharp] ---- path = new DocumentPath(project).Type("project1"); ----- -[source, csharp] ----- + Expect("project1").WhenSerializing(path.Type); + path = new DocumentPath(project).Index("project1"); + Expect("project1").WhenSerializing(path.Index); ---- -there is also a static way to describe such paths -[source, csharp] +and again, there is also a static way to describe such paths + +[source,csharp] ---- path = DocumentPath.Id(project); ----- -[source, csharp] ----- + Expect("project").WhenSerializing(path.Index); + Expect("project").WhenSerializing(path.Type); + Expect("hello-world").WhenSerializing(path.Id); + DocumentPath p = project; -var project = new Project { Name = "hello-world" }; ---- -Here we can see and example how DocumentPath helps your describe your requests more tersely -[source, csharp] +=== An example with requests + +[source,csharp] ---- -var request = new IndexRequest(2) { Document = project }; +var project = new Project { Name = "hello-world" }; ---- -[source, csharp] + +we can see an example of how `DocumentPath` helps your describe your requests more tersely + +[source,csharp] ---- +var request = new IndexRequest(2) { Document = project }; + request = new IndexRequest(project) { }; ---- -when comparing with the full blown constructor and passing document manually -DocumentPath -T -'s benefits become apparent. -[source, csharp] +when comparing with the full blown constructor and passing document manually, +`DocumentPath`'s benefits become apparent. + +[source,csharp] ---- request = new IndexRequest(IndexName.From(), TypeName.From(), 2) { - Document = project + Document = project }; ---- + diff --git a/docs/asciidoc/client-concepts/high-level/inference/features-inference.asciidoc b/docs/asciidoc/client-concepts/high-level/inference/features-inference.asciidoc new file mode 100644 index 00000000000..cffbe0757a4 --- /dev/null +++ b/docs/asciidoc/client-concepts/high-level/inference/features-inference.asciidoc @@ -0,0 +1,33 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[features-inference]] +== Features Inference + +Some URIs in Elasticsearch take a `Feature` enum. +Within NEST, route values on the URI are represented as classes that implement an interface, `IUrlParameter`. +Since enums _cannot_ implement interfaces in C#, a route parameter that would be of type `Feature` is represented using the `Features` class that +the `Feature` enum implicitly converts to. + +=== Constructor + +Using the `Features` constructor directly is possible but rather involved + +[source,csharp] +---- +Features fieldString = Feature.Mappings | Feature.Aliases; +Expect("_mappings,_aliases") + .WhenSerializing(fieldString); +---- + +Here we new an GET index elasticsearch request whichs takes Indices and Features. +Notice how we can use the Feature enum directly. + +[source,csharp] +---- +var request = new GetIndexRequest(All, Feature.Settings); +---- + diff --git a/docs/asciidoc/client-concepts/high-level/inference/field-inference.asciidoc b/docs/asciidoc/client-concepts/high-level/inference/field-inference.asciidoc new file mode 100644 index 00000000000..c76b6a0b5c1 --- /dev/null +++ b/docs/asciidoc/client-concepts/high-level/inference/field-inference.asciidoc @@ -0,0 +1,567 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[field-inference]] +== Field Inference + +Several places in the Elasticsearch API expect the path to a field from your original source document as a string. +NEST allows you to use C# expressions to strongly type these field path strings. + +These expressions are assigned to a type called `Field` and there are several ways to create an instance of one + +=== Constructor + +Using the constructor directly is possible _but_ rather involved + +[source,csharp] +---- +var fieldString = new Field("name"); +var fieldProperty = new Field(typeof(Project).GetProperty(nameof(Project.Name))); +Expression> expression = p => p.Name; +var fieldExpression = new Field(expression); +Expect("name") + .WhenSerializing(fieldExpression) + .WhenSerializing(fieldString) + .WhenSerializing(fieldProperty); +---- + +When using the constructor and passing a value for `Name`, `Property` or `Expression`, `ComparisonValue` is also set on the `Field` instance; this is used when + +* determining `Field` equality + +* getting the hash code for a `Field` instance + +[source,csharp] +---- +var fieldStringWithBoostTwo = new Field("name^2"); + +var fieldStringWithBoostThree = new Field("name^3"); + +Expression> expression = p => p.Name; + +var fieldExpression = new Field(expression); + +var fieldProperty = new Field(typeof(Project).GetProperty(nameof(Project.Name))); + +fieldStringWithBoostTwo.GetHashCode().Should().NotBe(0); + +fieldStringWithBoostThree.GetHashCode().Should().NotBe(0); + +fieldExpression.GetHashCode().Should().NotBe(0); + +fieldProperty.GetHashCode().Should().NotBe(0); + +fieldStringWithBoostTwo.Should().Be(fieldStringWithBoostThree); <1> +---- +<1> <> + +=== Implicit Conversion + +As you can see from the previous examples, using the constructor is rather involved and cumbersome. +Because of this, you can also implicitly convert strings and expressions to a `Field` + +[source,csharp] +---- +Field fieldString = "name"; +---- + +but for expressions this is _still_ rather involved + +[source,csharp] +---- +Expression> expression = p => p.Name; + +Field fieldExpression = expression; + +Expect("name") + .WhenSerializing(fieldExpression) + .WhenSerializing(fieldString); +---- + +[[field-name-with-boost]] +=== Field Names with Boost + +When specifying a `Field` name, the name can include a boost value; NEST will split the name and boost +value and set the `Boost` property + +[source,csharp] +---- +Field fieldString = "name^2"; + +Field fieldStringConstructor = new Field("name^2"); + +Field fieldStringCreate = new Field("name^2", 3); <1> + +fieldString.Name.Should().Be("name"); + +fieldStringConstructor.Name.Should().Be("name"); + +fieldStringCreate.Name.Should().Be("name"); + +fieldString.Boost.Should().Be(2); + +fieldStringConstructor.Boost.Should().Be(2); + +fieldStringCreate.Boost.Should().Be(2); +---- +<1> NEST will take the boost from the name + +[[nest-infer]] +=== Using Nest.Infer + +To ease creating a `Field` instance from expressions, there is a static `Infer` class you can use + +[source,csharp] +---- +Field fieldString = "name"; +---- + +but for expressions this is still rather involved + +[source,csharp] +---- +var fieldExpression = Infer.Field(p => p.Name); +---- + +this can be even shortened even further using a https://msdn.microsoft.com/en-us/library/sf0df423.aspx#Anchor_0[static import in C# 6] i.e. + `using static Nest.Infer;` + +[source,csharp] +---- +fieldExpression = Field(p => p.Name); +---- + +Now that is much terser then our first example using the constructor! + +[source,csharp] +---- +Expect("name") + .WhenSerializing(fieldString) + .WhenSerializing(fieldExpression); +---- + +You can specify boosts in the field using a string + +[source,csharp] +---- +fieldString = "name^2.1"; + +fieldString.Boost.Should().Be(2.1); +---- + +As well as using `Nest.Infer.Field` + +[source,csharp] +---- +fieldExpression = Field(p => p.Name, 2.1); + +Expect("name^2.1") + .WhenSerializing(fieldString) + .WhenSerializing(fieldExpression); +---- + +[[camel-casing]] +=== Field name casing + +By default, NEST will camel-case **all** field names to better align with typical +javascript/json conventions + +using `DefaultFieldNameInferrer()` on ConnectionSettings you can change this behavior + +[source,csharp] +---- +var setup = WithConnectionSettings(s => s.DefaultFieldNameInferrer(p => p.ToUpper())); + +setup.Expect("NAME").WhenSerializing(Field(p => p.Name)); +---- + +However `string` types are *always* passed along verbatim + +[source,csharp] +---- +setup.Expect("NaMe").WhenSerializing("NaMe"); +---- + +if you want the same behavior for expressions, simply pass a Func to `DefaultFieldNameInferrer` +to make no changes to the name + +[source,csharp] +---- +setup = WithConnectionSettings(s => s.DefaultFieldNameInferrer(p => p)); + +setup.Expect("Name").WhenSerializing(Field(p => p.Name)); +---- + +=== Complex field name expressions + +You can follow your property expression to any depth. Here we are traversing to the `LeadDeveloper` `FirstName` + +[source,csharp] +---- +Expect("leadDeveloper.firstName").WhenSerializing(Field(p => p.LeadDeveloper.FirstName)); +---- + +When dealing with collection indexers, the indexer access is ignored allowing you to traverse into properties of collections + +[source,csharp] +---- +Expect("curatedTags").WhenSerializing(Field(p => p.CuratedTags[0])); +---- + +Similarly, LINQ's `.First()` method also works + +[source,csharp] +---- +Expect("curatedTags").WhenSerializing(Field(p => p.CuratedTags.First())); + +Expect("curatedTags.added").WhenSerializing(Field(p => p.CuratedTags[0].Added)); + +Expect("curatedTags.name").WhenSerializing(Field(p => p.CuratedTags.First().Name)); +---- + +NOTE: Remember, these are _expressions_ and not actual code that will be executed + +An indexer on a dictionary is assumed to describe a property name + +[source,csharp] +---- +Expect("metadata.hardcoded").WhenSerializing(Field(p => p.Metadata["hardcoded"])); + +Expect("metadata.hardcoded.created").WhenSerializing(Field(p => p.Metadata["hardcoded"].Created)); +---- + +A cool feature here is that we'll evaluate variables passed to an indexer + +[source,csharp] +---- +var variable = "var"; + +Expect("metadata.var").WhenSerializing(Field(p => p.Metadata[variable])); + +Expect("metadata.var.created").WhenSerializing(Field(p => p.Metadata[variable].Created)); +---- + +If you are using Elasticearch's {ref_current}/_multi_fields.html[multi_fields], which you really should as they allow +you to analyze a string in a number of different ways, these __"virtual"__ sub fields +do not always map back on to your POCO. By calling `.Suffix()` on expressions, you describe the sub fields that +should be mapped and <> + +[source,csharp] +---- +Expect("leadDeveloper.firstName.raw").WhenSerializing( + Field(p => p.LeadDeveloper.FirstName.Suffix("raw"))); + +Expect("curatedTags.raw").WhenSerializing( + Field(p => p.CuratedTags[0].Suffix("raw"))); + +Expect("curatedTags.raw").WhenSerializing( + Field(p => p.CuratedTags.First().Suffix("raw"))); + +Expect("curatedTags.added.raw").WhenSerializing( + Field(p => p.CuratedTags[0].Added.Suffix("raw"))); + +Expect("metadata.hardcoded.raw").WhenSerializing( + Field(p => p.Metadata["hardcoded"].Suffix("raw"))); + +Expect("metadata.hardcoded.created.raw").WhenSerializing( + Field(p => p.Metadata["hardcoded"].Created.Suffix("raw"))); +---- + +You can even chain `.Suffix()` calls to any depth! + +[source,csharp] +---- +Expect("curatedTags.name.raw.evendeeper").WhenSerializing( + Field(p => p.CuratedTags.First().Name.Suffix("raw").Suffix("evendeeper"))); +---- + +Variables passed to suffix will be evaluated as well + +[source,csharp] +---- +var suffix = "unanalyzed"; + +Expect("metadata.var.unanalyzed").WhenSerializing( + Field(p => p.Metadata[variable].Suffix(suffix))); + +Expect("metadata.var.created.unanalyzed").WhenSerializing( + Field(p => p.Metadata[variable].Created.Suffix(suffix))); +---- + +Suffixes can also be appended to expressions using `.AppendSuffix()`. This is useful in cases where you want to apply the same suffix +to a list of fields. + +Here we have a list of expressions + +[source,csharp] +---- +var expressions = new List>> +{ + p => p.Name, + p => p.Description, + p => p.CuratedTags.First().Name, + p => p.LeadDeveloper.FirstName +}; +---- + +and we want to append the suffix "raw" to each + +[source,csharp] +---- +var fieldExpressions = + expressions.Select>, Field>(e => e.AppendSuffix("raw")).ToList(); + +Expect("name.raw").WhenSerializing(fieldExpressions[0]); + +Expect("description.raw").WhenSerializing(fieldExpressions[1]); + +Expect("curatedTags.name.raw").WhenSerializing(fieldExpressions[2]); + +Expect("leadDeveloper.firstName.raw").WhenSerializing(fieldExpressions[3]); +---- + +=== Attribute based naming + +Using NEST's property attributes you can specify a new name for the properties + +[source,csharp] +---- +public class BuiltIn +{ + [Text(Name = "naam")] + public string Name { get; set; } +} +---- + +[source,csharp] +---- +Expect("naam").WhenSerializing(Field(p => p.Name)); +---- + +Starting with NEST 2.x, we also ask the serializer if it can resolve a property to a name. +Here we ask the default `JsonNetSerializer` to resolve a property name and it takes +the `JsonPropertyAttribute` into account + +[source,csharp] +---- +public class SerializerSpecific +{ + [JsonProperty("nameInJson")] + public string Name { get; set; } +} +---- + +[source,csharp] +---- +Expect("nameInJson").WhenSerializing(Field(p => p.Name)); +---- + +If both a NEST property attribute and a serializer specific attribute are present on a property, +**NEST attributes take precedence** + +[source,csharp] +---- +public class Both +{ + [Text(Name = "naam")] + [JsonProperty("nameInJson")] + public string Name { get; set; } +} +---- + +[source,csharp] +---- +Expect("naam").WhenSerializing(Field(p => p.Name)); + +Expect(new +{ + naam = "Martijn Laarman" +}).WhenSerializing(new Both { Name = "Martijn Laarman" }); +---- + +[[field-inference-caching]] +=== Field Inference Caching + +Resolution of field names is cached _per_ `ConnectionSettings` instance. To demonstrate, +take the following simple POCOs + +[source,csharp] +---- +class A { public C C { get; set; } } + +class B { public C C { get; set; } } + +class C +{ + public string Name { get; set; } +} +---- + +[source,csharp] +---- +var connectionSettings = TestClient.CreateSettings(forceInMemory: true); + +var client = new ElasticClient(connectionSettings); + +var fieldNameOnA = client.Infer.Field(Field(p => p.C.Name)); + +var fieldNameOnB = client.Infer.Field(Field(p => p.C.Name)); +---- + +Here we have to similary shaped expressions on coming from A and on from B +that will resolve to the same field name, as expected + +[source,csharp] +---- +fieldNameOnA.Should().Be("c.name"); + +fieldNameOnB.Should().Be("c.name"); +---- + +now we create a new connection settings with a re-map for `C` on class `A` to `"d"` +now when we resolve the field path for property `C` on `A`, it will be different than +for property `C` on `B` + +[source,csharp] +---- +var newConnectionSettings = TestClient.CreateSettings(forceInMemory: true, modifySettings: s => s + .InferMappingFor(m => m + .Rename(p => p.C, "d") + ) +); + +var newClient = new ElasticClient(newConnectionSettings); + +fieldNameOnA = newClient.Infer.Field(Field(p => p.C.Name)); + +fieldNameOnB = newClient.Infer.Field(Field(p => p.C.Name)); + +fieldNameOnA.Should().Be("d.name"); + +fieldNameOnB.Should().Be("c.name"); +---- + +however we didn't break inferrence on the first client instance using its separate connection settings + +[source,csharp] +---- +fieldNameOnA = client.Infer.Field(Field(p => p.C.Name)); + +fieldNameOnB = client.Infer.Field(Field(p => p.C.Name)); + +fieldNameOnA.Should().Be("c.name"); + +fieldNameOnB.Should().Be("c.name"); +---- + +[[field-inference-precedence]] +=== Inference Precedence + +To wrap up, the precedence in which field names are inferred is: + +. A hard rename of the property on connection settings using `.Rename()` + +. A NEST property mapping + +. Ask the serializer if the property has a verbatim value e.g it has an explicit JsonPropery attribute. + +. Pass the MemberInfo's Name to the DefaultFieldNameInferrer which by default camelCases + +The following example class will demonstrate this precedence + +[source,csharp] +---- +class Precedence +{ + [Text(Name = "renamedIgnoresNest")] + [JsonProperty("renamedIgnoresJsonProperty")] + public string RenamedOnConnectionSettings { get; set; } <1> + + [Text(Name = "nestAtt")] + [JsonProperty("jsonProp")] + public string NestAttribute { get; set; } <2> + + [JsonProperty("jsonProp")] + public string JsonProperty { get; set; } <3> + + [JsonProperty("dontaskme")] + public string AskSerializer { get; set; } <4> + + public string DefaultFieldNameInferrer { get; set; } <5> +} +---- +<1> Even though this property has a NEST property mapping _and_ a `JsonProperty` attribute, We are going to provide a hard rename for it on ConnectionSettings later that should win. + +<2> This property has both a NEST attribute and a `JsonProperty`, NEST should win. + +<3> We should take the json property into account by itself + +<4> This property we are going to special case in our custom serializer to resolve to ask + +<5> We are going to register a DefaultFieldNameInferrer on ConnectionSettings that will uppercase all properties. + +Here we create a custom serializer that renames any property named `AskSerializer` to `ask` + +[source,csharp] +---- +class CustomSerializer : JsonNetSerializer +{ + public CustomSerializer(IConnectionSettingsValues settings) : base(settings) { } + + public override IPropertyMapping CreatePropertyMapping(MemberInfo memberInfo) + { + return memberInfo.Name == nameof(Precedence.AskSerializer) + ? new PropertyMapping { Name = "ask" } + : base.CreatePropertyMapping(memberInfo); + } +} +---- + +here we provide an explicit rename of a property on `ConnectionSettings` using `.Rename()` +and all properties that are not mapped verbatim should be uppercased + +[source,csharp] +---- +var usingSettings = WithConnectionSettings(s => s + + .InferMappingFor(m => m + .Rename(p => p.RenamedOnConnectionSettings, "renamed") + ) + .DefaultFieldNameInferrer(p => p.ToUpperInvariant()) +).WithSerializer(s => new CustomSerializer(s)); + +usingSettings.Expect("renamed").ForField(Field(p => p.RenamedOnConnectionSettings)); + +usingSettings.Expect("nestAtt").ForField(Field(p => p.NestAttribute)); + +usingSettings.Expect("jsonProp").ForField(Field(p => p.JsonProperty)); + +usingSettings.Expect("ask").ForField(Field(p => p.AskSerializer)); + +usingSettings.Expect("DEFAULTFIELDNAMEINFERRER").ForField(Field(p => p.DefaultFieldNameInferrer)); +---- + +The same naming rules also apply when indexing a document + +[source,csharp] +---- +usingSettings.Expect(new [] +{ + "ask", + "DEFAULTFIELDNAMEINFERRER", + "jsonProp", + "nestAtt", + "renamed" +}).AsPropertiesOf(new Precedence +{ + RenamedOnConnectionSettings = "renamed on connection settings", + NestAttribute = "using a nest attribute", + JsonProperty = "the default serializer resolves json property attributes", + AskSerializer = "serializer fiddled with this one", + DefaultFieldNameInferrer = "shouting much?" +}); +---- + diff --git a/docs/asciidoc/client-concepts/high-level/inference/ids-inference.asciidoc b/docs/asciidoc/client-concepts/high-level/inference/ids-inference.asciidoc new file mode 100644 index 00000000000..ed0629c6161 --- /dev/null +++ b/docs/asciidoc/client-concepts/high-level/inference/ids-inference.asciidoc @@ -0,0 +1,139 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[ids-inference]] +== Ids Inference + +=== Implicit Conversions + +Several places in the Elasticsearch API expect an `Id` object to be passed. +This is a special box type that you can implicitly convert to from the following types + +* `Int32` + +* `Int64` + +* `String` + +* `Guid` + +Methods that take an `Id` can be passed any of these types and it will be implicitly converted to an `Id` + +[source,csharp] +---- +Id idFromInt = 1; +Id idFromLong = 2L; +Id idFromString = "hello-world"; +Id idFromGuid = new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E"); +Expect(1).WhenSerializing(idFromInt); +Expect(2).WhenSerializing(idFromLong); +Expect("hello-world").WhenSerializing(idFromString); +Expect("d70bd3cf-4e38-46f3-91ca-fcbef29b148e").WhenSerializing(idFromGuid); +---- + +=== Inferring from a Type + +Sometimes a method takes an object and we need an Id from that object to build up a path. +There is no implicit conversion from any object to Id but we can call `Id.From`. + +Imagine your codebase has the following type that we want to index into Elasticsearch + +[source,csharp] +---- +class MyDTO +{ + public Guid Id { get; set; } + public string Name { get; set; } + public string OtherName { get; set; } +} +---- + +By default NEST will try to find a property called `Id` on the class using reflection +and create a cached fast func delegate based on the properties getter + +[source,csharp] +---- +var dto = new MyDTO +{ + Id = new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E"), + Name = "x", + OtherName = "y" +}; + +Expect("d70bd3cf-4e38-46f3-91ca-fcbef29b148e").WhenInferringIdOn(dto); +---- + +Using the connection settings you can specify a different property that NEST should use to infer the document Id. +Here we instruct NEST to infer the Id for `MyDTO` based on its `Name` property + +[source,csharp] +---- +WithConnectionSettings(x => x + .InferMappingFor(m => m + .IdProperty(p => p.Name) + ) +).Expect("x").WhenInferringIdOn(dto); +---- + +IMPORTANT: Inference rules are cached __per__ `ConnectionSettings` instance. + +Because the cache is per `ConnectionSettings` instance, we can create another `ConnectionSettings` instance +with different inference rules + +[source,csharp] +---- +WithConnectionSettings(x => x + .InferMappingFor(m => m + .IdProperty(p => p.OtherName) + ) +).Expect("y").WhenInferringIdOn(dto); +---- + +=== Using the ElasticsearchType attribute + +Another way is to mark the type with an `ElasticsearchType` attribute, setting `IdProperty` +to the name of the property that should be used for the document id + +[source,csharp] +---- +[ElasticsearchType(IdProperty = nameof(Name))] +class MyOtherDTO +{ + public Guid Id { get; set; } + public string Name { get; set; } + public string OtherName { get; set; } +} +---- + +Now when we infer the id we expect it to be the value of the `Name` property without doing any configuration on the `ConnectionSettings` + +[source,csharp] +---- +var dto = new MyOtherDTO +{ + Id = new Guid("D70BD3CF-4E38-46F3-91CA-FCBEF29B148E"), + Name = "x", + OtherName = "y" +}; + +Expect("x").WhenInferringIdOn(dto); +---- + +=== Using Mapping inference on ConnectionSettings + +This attribute *is* cached statically/globally, however an inference rule on the `ConnectionSettings` for the type will +still win over the attribute. Here we demonstrate this by creating a different `ConnectionSettings` instance +that will infer the document id from the property `OtherName`: + +[source,csharp] +---- +WithConnectionSettings(x => x + .InferMappingFor(m => m + .IdProperty(p => p.OtherName) + ) +).Expect("y").WhenInferringIdOn(dto); +---- + diff --git a/docs/asciidoc/client-concepts/high-level/inference/index-name-inference.asciidoc b/docs/asciidoc/client-concepts/high-level/inference/index-name-inference.asciidoc new file mode 100644 index 00000000000..c031e5dd8fe --- /dev/null +++ b/docs/asciidoc/client-concepts/high-level/inference/index-name-inference.asciidoc @@ -0,0 +1,107 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[index-name-inference]] +== Index Name Inference + +Many endpoints within the Elasticsearch API expect to receive one or more index names +as part of the request in order to know what index/indices a request should operate on. + +NEST has a number of ways in which an index name can be specified + +=== Default Index name on ConnectionSettings + +A default index name can be specified on `ConnectionSettings` usinf `.DefaultIndex()`. +This is the default index name to use when no other index name can be resolved for a request + +[source,csharp] +---- +var settings = new ConnectionSettings() + .DefaultIndex("defaultindex"); +var resolver = new IndexNameResolver(settings); +var index = resolver.Resolve(); +index.Should().Be("defaultindex"); +---- + +=== Mapping an Index name for POCOs + +A index name can be mapped for CLR types using `.MapDefaultTypeIndices()` on `ConnectionSettings`. + +[source,csharp] +---- +var settings = new ConnectionSettings() + .MapDefaultTypeIndices(m => m + .Add(typeof(Project), "projects") + ); + +var resolver = new IndexNameResolver(settings); + +var index = resolver.Resolve(); + +index.Should().Be("projects"); +---- + +=== Mapping an Index name for POCOs + +An index name for a POCO provided using `.MapDefaultTypeIndices()` **will take precedence** over +the default index name + +[source,csharp] +---- +var settings = new ConnectionSettings() + .DefaultIndex("defaultindex") + .MapDefaultTypeIndices(m => m + .Add(typeof(Project), "projects") + ); + +var resolver = new IndexNameResolver(settings); + +var index = resolver.Resolve(); + +index.Should().Be("projects"); +---- + +=== Explicitly specifying Index name on the request + +For API calls that expect an index name, the index name can be explicitly provided +on the request + +[source,csharp] +---- +Uri requestUri = null; + +var client = TestClient.GetInMemoryClient(s => s + .OnRequestCompleted(r => { requestUri = r.Uri; })); + +var response = client.Search(s => s.Index("some-other-index")); <1> + +requestUri.Should().NotBeNull(); + +requestUri.LocalPath.Should().StartWith("/some-other-index/"); +---- +<1> Provide the index name on the request + +When an index name is provided on a request, it **will take precedence** over the default +index name and any index name specified for the POCO type using `.MapDefaultTypeIndices()` + +[source,csharp] +---- +var client = TestClient.GetInMemoryClient(s => + new ConnectionSettings() + .DefaultIndex("defaultindex") + .MapDefaultTypeIndices(m => m + .Add(typeof(Project), "projects") + ) +); + +var response = client.Search(s => s.Index("some-other-index")); <1> + +response.ApiCall.Uri.Should().NotBeNull(); + +response.ApiCall.Uri.LocalPath.Should().StartWith("/some-other-index/"); +---- +<1> Provide the index name on the request + diff --git a/docs/asciidoc/client-concepts/high-level/inference/indices-paths.asciidoc b/docs/asciidoc/client-concepts/high-level/inference/indices-paths.asciidoc new file mode 100644 index 00000000000..7e9d632632b --- /dev/null +++ b/docs/asciidoc/client-concepts/high-level/inference/indices-paths.asciidoc @@ -0,0 +1,67 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[indices-paths]] +== Indices paths + +Some API's in Elasticsearch take one or many index name or a special `_all` marker to send the request to all the indices +In nest this is encoded using `Indices`. + +=== Implicit Conversion + +Several types implicitly convert to `Indices` + +[source,csharp] +---- +Nest.Indices singleIndexFromString = "name"; +Nest.Indices multipleIndicesFromString = "name1, name2"; +Nest.Indices allFromString = "_all"; +Nest.Indices allWithOthersFromString = "_all, name2"; +singleIndexFromString.Match( + all => all.Should().BeNull(), + many => many.Indices.Should().HaveCount(1).And.Contain("name") +); +multipleIndicesFromString.Match( + all => all.Should().BeNull(), + many => many.Indices.Should().HaveCount(2).And.Contain("name2") +); +allFromString.Match( + all => all.Should().NotBeNull(), + many => many.Indices.Should().BeNull() +); +allWithOthersFromString.Match( + all => all.Should().NotBeNull(), + many => many.Indices.Should().BeNull() +); +---- + +[[nest-indices]] +=== Using Nest.Indices + +To ease creating `IndexName` or `Indices` from expressions, there is a static `Nest.Indices` class you can use + +[source,csharp] +---- +var all = Nest.Indices.All; <1> + +var many = Nest.Indices.Index("name1", "name2"); <2> + +var manyTyped = Nest.Indices.Index().And(); <3> + +var singleTyped = Nest.Indices.Index(); + +var singleString = Nest.Indices.Index("name1"); + +var invalidSingleString = Nest.Indices.Index("name1, name2"); <4> +---- +<1> Using `_all` indices + +<2> specifying multiple indices using strings + +<3> speciying multiple using types + +<4> an **invalid** single index name + diff --git a/docs/asciidoc/client-concepts/high-level/inference/property-inference.asciidoc b/docs/asciidoc/client-concepts/high-level/inference/property-inference.asciidoc new file mode 100644 index 00000000000..e125c814e9a --- /dev/null +++ b/docs/asciidoc/client-concepts/high-level/inference/property-inference.asciidoc @@ -0,0 +1,101 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[property-inference]] +== Property Name Inference + +=== Appending suffixes to a Lambda expression body + +Suffixes can be appended to the body of a lambda expression, useful in cases where +you have a POCO property mapped as a {ref_current}/_multi_fields.html[multi_field] +and want to use strongly typed access based on the property, yet append a suffix to the +generated field name in order to access a particular `multi_field`. + +The `.Suffix()` extension method can be used for this purpose and when serializing expressions suffixed +in this way, the serialized field name resolves to the last token + +[source,csharp] +---- +Expression> expression = p => p.Name.Suffix("raw"); +Expect("raw").WhenSerializing(expression); +---- + +=== Appending suffixes to a Lambda expression + +Alternatively, suffixes can be applied to a lambda expression directly using +the `.ApplySuffix()` extension method. Again, the serialized field name +resolves to the last token + +[source,csharp] +---- +Expression> expression = p => p.Name; + +expression = expression.AppendSuffix("raw"); + +Expect("raw").WhenSerializing(expression); +---- + +=== Naming conventions + +Currently, the name of a field cannot contain a `.` in Elasticsearch due to the potential for ambiguity with +a field that is mapped as a {ref_current}/_multi_fields.html[multi_field]. + +In these cases, NEST allows the call to go to Elasticsearch, deferring the naming conventions to the server side and, +in the case of a `.` in a field name, a `400 Bad Response` is returned with a server error indicating the reason + +[source,csharp] +---- +var createIndexResponse = _client.CreateIndex("random-" + Guid.NewGuid().ToString().ToLowerInvariant(), c => c + .Mappings(m => m + .Map("type-with-dot", mm => mm + .Properties(p => p + .Text(s => s + .Name("name-with.dot") + ) + ) + ) + ) +); +---- + +The response is not valid + +[source,csharp] +---- +createIndexResponse.IsValid.Should().BeFalse(); +---- + +`DebugInformation` provides an audit trail of information to help diagnose the issue + +[source,csharp] +---- +createIndexResponse.DebugInformation.Should().NotBeNullOrEmpty(); +---- + +`ServerError` contains information about the response from Elasticsearch + +[source,csharp] +---- +createIndexResponse.ServerError.Should().NotBeNull(); + +createIndexResponse.ServerError.Status.Should().Be(400); + +createIndexResponse.ServerError.Error.Should().NotBeNull(); + +createIndexResponse.ServerError.Error.RootCause.Should().NotBeNullOrEmpty(); + +var rootCause = createIndexResponse.ServerError.Error.RootCause[0]; +---- + +We can see that the underlying reason is a `.` in the field name "name-with.dot" + +[source,csharp] +---- +rootCause.Reason.Should().Be("Field name [name-with.dot] cannot contain '.'"); + +rootCause.Type.Should().Be("mapper_parsing_exception"); +---- + diff --git a/docs/asciidoc/client-concepts/high-level/mapping/auto-map.asciidoc b/docs/asciidoc/client-concepts/high-level/mapping/auto-map.asciidoc new file mode 100644 index 00000000000..8bc8b4b52e7 --- /dev/null +++ b/docs/asciidoc/client-concepts/high-level/mapping/auto-map.asciidoc @@ -0,0 +1,1150 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[auto-map]] +== Auto mapping properties + +When creating a mapping (either when creating an index or via the put mapping API), +NEST offers a feature called `.AutoMap()`, which will automagically infer the correct +Elasticsearch datatypes of the POCO properties you are mapping. Alternatively, if +you're using attributes to map your properties, then calling `.AutoMap()` is required +in order for your attributes to be applied. We'll look at the features of auto mapping +with a number of examples. + +For these examples, we'll define two POCOS, `Company`, which has a name +and a collection of Employees, and `Employee` which has various properties of +different types, and itself has a collection of `Employee` types. + +[source,csharp] +---- +public class Company +{ + public string Name { get; set; } + public List Employees { get; set; } +} + +public class Employee +{ + public string FirstName { get; set; } + public string LastName { get; set; } + public int Salary { get; set; } + public DateTime Birthday { get; set; } + public bool IsManager { get; set; } + public List Employees { get; set; } + public TimeSpan Hours { get; set; } +} +---- + +=== Manual mapping + +To create a mapping for our Company type, we can use the fluent API +and map each property explicitly + +[source,csharp] +---- +var descriptor = new CreateIndexDescriptor("myindex") + .Mappings(ms => ms + .Map(m => m + .Properties(ps => ps + .Text(s => s + .Name(c => c.Name) <1> + ) + .Object(o => o <2> + .Name(c => c.Employees) + .Properties(eps => eps + .Text(s => s + .Name(e => e.FirstName) + ) + .Text(s => s + .Name(e => e.LastName) + ) + .Number(n => n + .Name(e => e.Salary) + .Type(NumberType.Integer) + ) + ) + ) + ) + ) + ); +---- +<1> map `Name` as a `string` type + +<2> map `Employees` as an `object` type, mapping each of the properties of `Employee` + +This is all fine and dandy and useful for some use cases however in most cases +this can become verbose and wieldy. The majority of the time you simply just want to map *all* +the properties of a POCO in a single go. + +[source,csharp] +---- +var expected = new +{ + mappings = new + { + company = new + { + properties = new + { + name = new + { + type = "text" + }, + employees = new + { + type = "object", + properties = new + { + firstName = new + { + type = "text" + }, + lastName = new + { + type = "text" + }, + salary = new + { + type = "integer" + } + } + } + } + } + } +}; + +Expect(expected).WhenSerializing((ICreateIndexRequest)descriptor); +---- + +=== Simple Automapping + +This is exactly where `.AutoMap()` becomes useful. Instead of manually mapping each property, +explicitly, we can instead call `.AutoMap()` for each of our mappings and let NEST do all the work + +[source,csharp] +---- +var descriptor = new CreateIndexDescriptor("myindex") + .Mappings(ms => ms + .Map(m => m.AutoMap()) + .Map(m => m.AutoMap()) + ); +---- + +Observe that NEST has inferred the Elasticsearch types based on the CLR type of our POCO properties. +In this example, + +* Birthday was mapped as a `date`, + +* Hours was mapped as a `long` (ticks) + +* IsManager was mapped as a `bool`, + +* Salary as an `integer` + +* Employees as an `object` + +and the remaining string properties as `string` types + +[source,csharp] +---- +var expected = new +{ + mappings = new + { + company = new + { + properties = new + { + employees = new + { + properties = new + { + birthday = new + { + type = "date" + }, + employees = new + { + properties = new { }, + type = "object" + }, + firstName = new + { + fields = new + { + keyword = new + { + type = "keyword" + } + }, + type = "text" + }, + hours = new + { + type = "long" + }, + isManager = new + { + type = "boolean" + }, + lastName = new + { + fields = new + { + keyword = new + { + type = "keyword" + } + }, + type = "text" + }, + salary = new + { + type = "integer" + } + }, + type = "object" + }, + name = new + { + fields = new + { + keyword = new + { + type = "keyword" + } + }, + type = "text" + } + } + }, + employee = new + { + properties = new + { + birthday = new + { + type = "date" + }, + employees = new + { + properties = new { }, + type = "object" + }, + firstName = new + { + fields = new + { + keyword = new + { + type = "keyword" + } + }, + type = "text" + }, + hours = new + { + type = "long" + }, + isManager = new + { + type = "boolean" + }, + lastName = new + { + fields = new + { + keyword = new + { + type = "keyword" + } + }, + type = "text" + }, + salary = new + { + type = "integer" + } + } + } + } +}; + +Expect(expected).WhenSerializing((ICreateIndexRequest)descriptor); +---- + +[[auto-mapping-with-overrides]] +[float] +== Auto mapping with overrides + +In most cases, you'll want to map more than just the vanilla datatypes and also provide +various options for your properties (analyzer to use, whether to enable doc_values, etc...). +In that case, it's possible to use `.AutoMap()` in conjuction with explicitly mapped properties. + +Here we are using `.AutoMap()` to automatically map our company type, but then we're +overriding our employee property and making it a `nested` type, since by default, `.AutoMap()` will infer objects as `object`. + +[source,csharp] +---- +var descriptor = new CreateIndexDescriptor("myindex") + .Mappings(ms => ms + .Map(m => m + .AutoMap() + .Properties(ps => ps + .Nested(n => n + .Name(c => c.Employees) + ) + ) + ) + ); + +var expected = new +{ + mappings = new + { + company = new + { + properties = new + { + name = new + { + type = "text", + fields = new + { + keyword = new + { + type = "keyword" + } + } + }, + employees = new + { + type = "nested", + } + } + } + } +}; + +Expect(expected).WhenSerializing((ICreateIndexRequest)descriptor); +---- + +`.AutoMap()` is idempotent; calling it before or after manually +mapped properties will still yield the same results. + +[source,csharp] +---- +descriptor = new CreateIndexDescriptor("myindex") + .Mappings(ms => ms + .Map(m => m + .Properties(ps => ps + .Nested(n => n + .Name(c => c.Employees) + ) + ) + .AutoMap() + ) + ); + +Expect(expected).WhenSerializing((ICreateIndexRequest)descriptor); +---- + +[[attribute-mapping]] +[float] +== Attribute mapping + +It is also possible to define your mappings using attributes on your POCOs. When you +use attributes, you *must* use `.AutoMap()` in order for the attributes to be applied. +Here we define the same two types as before, but this time using attributes to define the mappings. + +[source,csharp] +---- +[ElasticsearchType(Name = "company")] +public class CompanyWithAttributes +{ + [Keyword(NullValue = "null", Similarity = SimilarityOption.BM25)] + public string Name { get; set; } + + [Text(Name = "office_hours")] + public TimeSpan? HeadOfficeHours { get; set; } + + [Object(Path = "employees", Store = false)] + public List Employees { get; set; } +} + +[ElasticsearchType(Name = "employee")] +public class EmployeeWithAttributes +{ + [Text(Name = "first_name")] + public string FirstName { get; set; } + + [Text(Name = "last_name")] + public string LastName { get; set; } + + [Number(DocValues = false, IgnoreMalformed = true, Coerce = true)] + public int Salary { get; set; } + + [Date(Format = "MMddyyyy", NumericResolution = NumericResolutionUnit.Seconds)] + public DateTime Birthday { get; set; } + + [Boolean(NullValue = false, Store = true)] + public bool IsManager { get; set; } + + [Nested(Path = "employees")] + [JsonProperty("empl")] + public List Employees { get; set; } +} +---- + +Then we map the types by calling `.AutoMap()` + +[source,csharp] +---- +var descriptor = new CreateIndexDescriptor("myindex") + .Mappings(ms => ms + .Map(m => m.AutoMap()) + .Map(m => m.AutoMap()) + ); + +var expected = new +{ + mappings = new + { + company = new + { + properties = new + { + employees = new + { + path = "employees", + properties = new + { + birthday = new + { + type = "date" + }, + employees = new + { + properties = new { }, + type = "object" + }, + firstName = new + { + fields = new + { + keyword = new + { + type = "keyword" + } + }, + type = "text" + }, + hours = new + { + type = "long" + }, + isManager = new + { + type = "boolean" + }, + lastName = new + { + fields = new + { + keyword = new + { + type = "keyword" + } + }, + type = "text" + }, + salary = new + { + type = "integer" + } + }, + store = false, + type = "object" + }, + name = new + { + null_value = "null", + similarity = "BM25", + type = "keyword" + }, + office_hours = new + { + type = "text" + } + } + }, + employee = new + { + properties = new + { + birthday = new + { + format = "MMddyyyy", + numeric_resolution = "seconds", + type = "date" + }, + empl = new + { + path = "employees", + properties = new + { + birthday = new + { + type = "date" + }, + employees = new + { + properties = new { }, + type = "object" + }, + firstName = new + { + fields = new + { + keyword = new + { + type = "keyword" + } + }, + type = "text" + }, + hours = new + { + type = "long" + }, + isManager = new + { + type = "boolean" + }, + lastName = new + { + fields = new + { + keyword = new + { + type = "keyword" + } + }, + type = "text" + }, + salary = new + { + type = "integer" + } + }, + type = "nested" + }, + first_name = new + { + type = "text" + }, + isManager = new + { + null_value = false, + store = true, + type = "boolean" + }, + last_name = new + { + type = "text" + }, + salary = new + { + coerce = true, + doc_values = false, + ignore_malformed = true, + type = "double" + } + } + } + } +}; + +Expect(expected).WhenSerializing(descriptor as ICreateIndexRequest); +---- + +Just as we were able to override the inferred properties in our earlier example, explicit (manual) +mappings also take precedence over attributes. Therefore we can also override any mappings applied +via any attributes defined on the POCO + +[source,csharp] +---- +var descriptor = new CreateIndexDescriptor("myindex") + .Mappings(ms => ms + .Map(m => m + .AutoMap() + .Properties(ps => ps + .Nested(n => n + .Name(c => c.Employees) + ) + ) + ) + .Map(m => m + .AutoMap() + .TtlField(ttl => ttl + .Enable() + .Default("10m") + ) + .Properties(ps => ps + .Text(s => s + .Name(e => e.FirstName) + .Fields(fs => fs + .Keyword(ss => ss + .Name("firstNameRaw") + ) + .TokenCount(t => t + .Name("length") + .Analyzer("standard") + ) + ) + ) + .Number(n => n + .Name(e => e.Salary) + .Type(NumberType.Double) + .IgnoreMalformed(false) + ) + .Date(d => d + .Name(e => e.Birthday) + .Format("MM-dd-yy") + ) + ) + ) + ); + +var expected = new +{ + mappings = new + { + company = new + { + properties = new + { + employees = new + { + type = "nested" + }, + name = new + { + null_value = "null", + similarity = "BM25", + type = "keyword" + }, + office_hours = new + { + type = "text" + } + } + }, + employee = new + { + _ttl = new + { + @default = "10m", + enabled = true + }, + properties = new + { + birthday = new + { + format = "MM-dd-yy", + type = "date" + }, + empl = new + { + path = "employees", + properties = new + { + birthday = new + { + type = "date" + }, + employees = new + { + properties = new { }, + type = "object" + }, + firstName = new + { + fields = new + { + keyword = new + { + type = "keyword" + } + }, + type = "text" + }, + hours = new + { + type = "long" + }, + isManager = new + { + type = "boolean" + }, + lastName = new + { + fields = new + { + keyword = new + { + type = "keyword" + } + }, + type = "text" + }, + salary = new + { + type = "integer" + } + }, + type = "nested" + }, + first_name = new + { + fields = new + { + firstNameRaw = new + { + type = "keyword" + }, + length = new + { + analyzer = "standard", + type = "token_count" + } + }, + type = "text" + }, + isManager = new + { + null_value = false, + store = true, + type = "boolean" + }, + last_name = new + { + type = "text" + }, + salary = new + { + ignore_malformed = false, + type = "double" + } + } + } + } +}; + +Expect(expected).WhenSerializing((ICreateIndexRequest)descriptor); +---- + +[[ignoring-properties]] +[float] +== Ignoring Properties + +Properties on a POCO can be ignored in a few ways: + +* Using the `Ignore` property on a derived `ElasticsearchPropertyAttribute` type applied to the property that should be ignored on the POCO + +* Using the `.InferMappingFor(Func, IClrTypeMapping> selector)` on the connection settings + +* Using an ignore attribute applied to the POCO property that is understood by the `IElasticsearchSerializer` used, and inspected inside of the `CreatePropertyMapping()` on the serializer. In the case of the default `JsonNetSerializer`, this is the Json.NET `JsonIgnoreAttribute` + +This example demonstrates all ways, using the `Ignore` property on the attribute to ignore the property `PropertyToIgnore`, the infer mapping to ignore the +property `AnotherPropertyToIgnore` and the json serializer specific attribute to ignore the property `JsonIgnoredProperty` + +[source,csharp] +---- +[ElasticsearchType(Name = "company")] +public class CompanyWithAttributesAndPropertiesToIgnore +{ + public string Name { get; set; } + + [Text(Ignore = true)] + public string PropertyToIgnore { get; set; } + + public string AnotherPropertyToIgnore { get; set; } + + [JsonIgnore] + public string JsonIgnoredProperty { get; set; } +} +---- + +All of the properties except `Name` have been ignored in the mapping + +[source,csharp] +---- +var descriptor = new CreateIndexDescriptor("myindex") + .Mappings(ms => ms + .Map(m => m + .AutoMap() + ) + ); + +var expected = new +{ + mappings = new + { + company = new + { + properties = new + { + name = new + { + type = "text", + fields = new + { + keyword = new + { + type = "keyword" + } + } + } + } + } + } +}; + +var settings = WithConnectionSettings(s => s + .InferMappingFor(i => i + .Ignore(p => p.AnotherPropertyToIgnore) + ) +); + +settings.Expect(expected).WhenSerializing((ICreateIndexRequest)descriptor); +---- + +[[mapping-recursion]] +[float] +== Mapping Recursion + +If you notice in our previous `Company` and `Employee` examples, the `Employee` type is recursive +in that the `Employee` class itself contains a collection of type `Employee`. By default, `.AutoMap()` will only +traverse a single depth when it encounters recursive instances like this. Hence, in the +previous examples, the collection of type `Employee` on the `Employee` class did not get any of its properties mapped. +This is done as a safe-guard to prevent stack overflows and all the fun that comes with +infinite recursion. Additionally, in most cases, when it comes to Elasticsearch mappings, it is +often an edge case to have deeply nested mappings like this. However, you may still have +the need to do this, so you can control the recursion depth of `.AutoMap()`. + +Let's introduce a very simple class, `A`, which itself has a property +Child of type `A`. + +[source,csharp] +---- +public class A +{ + public A Child { get; set; } +} +---- + +By default, `.AutoMap()` only goes as far as depth 1 + +[source,csharp] +---- +var descriptor = new CreateIndexDescriptor("myindex") + .Mappings(ms => ms + .Map(m => m.AutoMap()) + ); +---- + +Thus we do not map properties on the second occurrence of our Child property + +[source,csharp] +---- +var expected = new +{ + mappings = new + { + a = new + { + properties = new + { + child = new + { + properties = new { }, + type = "object" + } + } + } + } +}; + +Expect(expected).WhenSerializing((ICreateIndexRequest)descriptor); +---- + +Now lets specify a maxRecursion of 3 + +[source,csharp] +---- +var withMaxRecursionDescriptor = new CreateIndexDescriptor("myindex") + .Mappings(ms => ms + .Map(m => m.AutoMap(3)) + ); +---- + +`.AutoMap()` has now mapped three levels of our Child property + +[source,csharp] +---- +var expectedWithMaxRecursion = new +{ + mappings = new + { + a = new + { + properties = new + { + child = new + { + type = "object", + properties = new + { + child = new + { + type = "object", + properties = new + { + child = new + { + type = "object", + properties = new + { + child = new + { + type = "object", + properties = new { } + } + } + } + } + } + } + } + } + } + } +}; + +Expect(expectedWithMaxRecursion).WhenSerializing((ICreateIndexRequest)withMaxRecursionDescriptor); +---- + +[source,csharp] +---- +var descriptor = new PutMappingDescriptor().AutoMap(); + +var expected = new +{ + properties = new + { + child = new + { + properties = new { }, + type = "object" + } + } +}; + +Expect(expected).WhenSerializing((IPutMappingRequest)descriptor); + +var withMaxRecursionDescriptor = new PutMappingDescriptor().AutoMap(3); + +var expectedWithMaxRecursion = new +{ + properties = new + { + child = new + { + type = "object", + properties = new + { + child = new + { + type = "object", + properties = new + { + child = new + { + type = "object", + properties = new + { + child = new + { + type = "object", + properties = new { } + } + } + } + } + } + } + } + } +}; + +Expect(expectedWithMaxRecursion).WhenSerializing((IPutMappingRequest)withMaxRecursionDescriptor); +---- + +[[applying-conventions-through-the-visitor-pattern]] +[float] +== Applying conventions through the Visitor pattern + +It is also possible to apply a transformation on all or specific properties. + +`.AutoMap()` internally implements the https://en.wikipedia.org/wiki/Visitor_pattern[visitor pattern]. The default visitor, `NoopPropertyVisitor`, +does nothing and acts as a blank canvas for you to implement your own visiting methods. + +For instance, lets create a custom visitor that disables doc values for numeric and boolean types +(Not really a good idea in practice, but let's do it anyway for the sake of a clear example.) + +[source,csharp] +---- +public class DisableDocValuesPropertyVisitor : NoopPropertyVisitor +{ + public override void Visit( + INumberProperty type, + PropertyInfo propertyInfo, + ElasticsearchPropertyAttributeBase attribute) <1> + { + type.DocValues = false; + } + + public override void Visit( + IBooleanProperty type, + PropertyInfo propertyInfo, + ElasticsearchPropertyAttributeBase attribute) <2> + { + type.DocValues = false; + } +} +---- +<1> Override the `Visit` method on `INumberProperty` and set `DocValues = false` + +<2> Similarily, override the `Visit` method on `IBooleanProperty` and set `DocValues = false` + +Now we can pass an instance of our custom visitor to `.AutoMap()` + +[source,csharp] +---- +var descriptor = new CreateIndexDescriptor("myindex") + .Mappings(ms => ms + .Map(m => m.AutoMap(new DisableDocValuesPropertyVisitor())) + ); +---- + +and any time the client maps a property of the POCO (Employee in this example) as a number (INumberProperty) or boolean (IBooleanProperty), +it will apply the transformation defined in each `Visit()` call respectively, which in this example +disables {ref_current}/doc-values.html[doc_values]. + +[source,csharp] +---- +var expected = new +{ + mappings = new + { + employee = new + { + properties = new + { + birthday = new + { + type = "date" + }, + employees = new + { + properties = new { }, + type = "object" + }, + firstName = new + { + type = "string" + }, + isManager = new + { + doc_values = false, + type = "boolean" + }, + lastName = new + { + type = "string" + }, + salary = new + { + doc_values = false, + type = "integer" + } + } + } + } +}; +---- + +=== Visiting on PropertyInfo + +You can even take the visitor approach a step further, and instead of visiting on `IProperty` types, visit +directly on your POCO properties (PropertyInfo). As an example, let's create a visitor that maps all CLR types +to an Elasticsearch text datatype (ITextProperty). + +[source,csharp] +---- +public class EverythingIsAStringPropertyVisitor : NoopPropertyVisitor +{ + public override IProperty Visit(PropertyInfo propertyInfo, ElasticsearchPropertyAttributeBase attribute) => new TextProperty(); +} +---- + +[source,csharp] +---- +var descriptor = new CreateIndexDescriptor("myindex") + .Mappings(ms => ms + .Map(m => m.AutoMap(new EverythingIsAStringPropertyVisitor())) + ); + +var expected = new +{ + mappings = new + { + employee = new + { + properties = new + { + birthday = new + { + type = "text" + }, + employees = new + { + type = "text" + }, + firstName = new + { + type = "text" + }, + isManager = new + { + type = "text" + }, + lastName = new + { + type = "text" + }, + salary = new + { + type = "text" + } + } + } + } +}; +---- + diff --git a/docs/asciidoc/client-concepts/low-level/class.png b/docs/asciidoc/client-concepts/low-level/class.png new file mode 100644 index 00000000000..bbc981cfe7a Binary files /dev/null and b/docs/asciidoc/client-concepts/low-level/class.png differ diff --git a/docs/asciidoc/client-concepts/low-level/connecting.asciidoc b/docs/asciidoc/client-concepts/low-level/connecting.asciidoc new file mode 100644 index 00000000000..b46e77947c7 --- /dev/null +++ b/docs/asciidoc/client-concepts/low-level/connecting.asciidoc @@ -0,0 +1,358 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[connecting]] +== Connecting + +Connecting to Elasticsearch with `Elasticsearch.Net` is quite easy and there a few options to suit a number of different use cases. + +[[connection-strategies]] +=== Choosing the right Connection Strategy + +If you simply new an `ElasticLowLevelClient`, it will be a non-failover connection to `http://localhost:9200` + +[source,csharp] +---- +var client = new ElasticLowLevelClient(); +---- + +If your Elasticsearch node does not live at `http://localhost:9200` but instead lives somewhere else, for example, `http://mynode.example.com:8082/apiKey`, then +you will need to pass in some instance of `IConnectionConfigurationValues`. + +The easiest way to do this is: + +[source,csharp] +---- +var node = new Uri("http://mynode.example.com:8082/apiKey"); + +var config = new ConnectionConfiguration(node); + +var client = new ElasticLowLevelClient(config); +---- + +This will still be a non-failover connection, meaning if that `node` goes down the operation will not be retried on any other nodes in the cluster. + +To get a failover connection we have to pass an <> instance instead of a `Uri`. + +[source,csharp] +---- +var node = new Uri("http://mynode.example.com:8082/apiKey"); + +var connectionPool = new SniffingConnectionPool(new[] { node }); + +var config = new ConnectionConfiguration(connectionPool); + +var client = new ElasticLowLevelClient(config); +---- + +Here instead of directly passing `node`, we pass a <> +which will use our `node` to find out the rest of the available cluster nodes. +Be sure to read more about <>. + +=== Configuration Options + +Besides either passing a `Uri` or `IConnectionPool` to `ConnectionConfiguration`, you can also fluently control many more options. For instance: + +[source,csharp] +---- +var node = new Uri("http://mynode.example.com:8082/apiKey"); + +var connectionPool = new SniffingConnectionPool(new[] { node }); + +var config = new ConnectionConfiguration(connectionPool) + .DisableDirectStreaming() <1> + .BasicAuthentication("user", "pass") + .RequestTimeout(TimeSpan.FromSeconds(5)); +---- +<1> Additional options are fluent method calls on `ConnectionConfiguration` + +The following is a list of available connection configuration options: + +[source,csharp] +---- +var config = new ConnectionConfiguration() + .DisableAutomaticProxyDetection() <1> + .EnableHttpCompression() <2> + .DisableDirectStreaming(); <3> + +var client = new ElasticLowLevelClient(config); + +var result = client.Search>(new { size = 12 }); +---- +<1> Disable automatic proxy detection. When called, defaults to `true`. + +<2> Enable compressed request and responses from Elasticsearch (Note that nodes need to be configured to allow this. See the {ref_current}/modules-http.html[http module settings] for more info). + +<3> By default responses are deserialized directly from the response stream to the object you tell it to. For debugging purposes, it can be very useful to keep a copy of the raw response on the result object, which is what calling this method will do. + +`.ResponseBodyInBytes` will only have a value if the client configuration has `DisableDirectStreaming` set + +[source,csharp] +---- +var raw = result.ResponseBodyInBytes; +---- + +Please note that using `.DisableDirectStreaming` only makes sense if you need the mapped response **and** the raw response __at the same time__. +If you need only a `string` response simply call + +[source,csharp] +---- +var stringResult = client.Search(new { }); +---- + +and similarly, if you need only a `byte[]` + +[source,csharp] +---- +var byteResult = client.Search(new { }); +---- + +other configuration options + +[source,csharp] +---- +config = config + .GlobalQueryStringParameters(new NameValueCollection()) <1> + .Proxy(new Uri("http://myproxy"), "username", "pass") <2> + .RequestTimeout(TimeSpan.FromSeconds(4)) <3> + .ThrowExceptions() <4> + .PrettyJson() <5> + .BasicAuthentication("username", "password"); +---- +<1> Allows you to set querystring parameters that have to be added to every request. For instance, if you use a hosted elasticserch provider, and you need need to pass an `apiKey` parameter onto every request. + +<2> Sets proxy information on the connection. + +<3> [[request-timeout]] Sets the global maximum time a connection may take. Please note that this is the request timeout, the builtin .NET `WebRequest` has no way to set connection timeouts (see http://msdn.microsoft.com/en-us/library/system.net.httpwebrequest.timeout(v=vs.110).aspx[the MSDN documentation on `HttpWebRequest.Timeout` Property]). + +<4> As an alternative to the C/go like error checking on `response.IsValid`, you can instead tell the client to <>. + +<5> forces all serialization to be indented and appends `pretty=true` to all the requests so that the responses are indented as well + +NOTE: Basic authentication credentials can alternatively be specified on the node URI directly: + +[source,csharp] +---- +var uri = new Uri("http://username:password@localhost:9200"); + +var settings = new ConnectionConfiguration(uri); +---- + +...but this may become tedious when using connection pooling with multiple nodes. + +[[thrown-exceptions]] +=== Exceptions + +There are three categories of exceptions that may be thrown: + +`ElasticsearchClientException`:: +These are known exceptions, either an exception that occurred in the request pipeline +(such as max retries or timeout reached, bad authentication, etc...) or Elasticsearch itself returned an error (could +not parse the request, bad query, missing field, etc...). If it is an Elasticsearch error, the `ServerError` property +on the response will contain the the actual error that was returned. The inner exception will always contain the +root causing exception. + +`UnexpectedElasticsearchClientException`:: +These are unknown exceptions, for instance a response from Elasticsearch not +properly deserialized. These are usually bugs and {github}/issues[should be reported]. This exception also inherits from `ElasticsearchClientException` +so an additional catch block isn't necessary, but can be helpful in distinguishing between the two. + +Development time exceptions:: +These are CLR exceptions like `ArgumentException`, `ArgumentOutOfRangeException`, etc. +that are thrown when an API in the client is misused. +These should not be handled as you want to know about them during development. + +=== OnRequestCompleted + +You can pass a callback of type `Action` that can eaves drop every time a response (good or bad) is created. +If you have complex logging needs this is a good place to add that in. + +[source,csharp] +---- +var counter = 0; + +var client = TestClient.GetInMemoryClient(s => s.OnRequestCompleted(r => counter++)); + +client.RootNodeInfo(); + +counter.Should().Be(1); + +client.RootNodeInfoAsync(); + +counter.Should().Be(2); +---- + +`OnRequestCompleted` is called even when an exception is thrown + +[source,csharp] +---- +var counter = 0; + +var client = TestClient.GetFixedReturnClient(new { }, 500, s => s + .ThrowExceptions() + .OnRequestCompleted(r => counter++) +); + +Assert.Throws(() => client.RootNodeInfo()); + +counter.Should().Be(1); + +Assert.ThrowsAsync(() => client.RootNodeInfoAsync()); + +counter.Should().Be(2); +---- + +[[complex-logging]] +=== Complex logging with OnRequestCompleted + +Here's an example of using `OnRequestCompleted()` for complex logging. Remember, if you would also like +to capture the request and/or response bytes, you also need to set `.DisableDirectStreaming()` to `true` + +[source,csharp] +---- +var list = new List(); + +var connectionPool = new SingleNodeConnectionPool(new Uri("http://localhost:9200")); + +var settings = new ConnectionSettings(connectionPool, new InMemoryConnection()) <1> + .DefaultIndex("default-index") + .DisableDirectStreaming() + .OnRequestCompleted(response => + { + // log out the request and the request body, if one exists for the type of request + if (response.RequestBodyInBytes != null) + { + list.Add( + $"{response.HttpMethod} {response.Uri} \n" + + $"{Encoding.UTF8.GetString(response.RequestBodyInBytes)}"); + } + else + { + list.Add($"{response.HttpMethod} {response.Uri}"); + } + + // log out the response and the response body, if one exists for the type of response + if (response.ResponseBodyInBytes != null) + { + list.Add($"Status: {response.HttpStatusCode}\n" + + $"{Encoding.UTF8.GetString(response.ResponseBodyInBytes)}\n" + + $"{new string('-', 30)}\n"); + } + else + { + list.Add($"Status: {response.HttpStatusCode}\n" + + $"{new string('-', 30)}\n"); + } + }); + +var client = new ElasticClient(settings); + +var syncResponse = client.Search(s => s + .AllTypes() + .AllIndices() + .Scroll("2m") + .Sort(ss => ss + .Ascending(SortSpecialField.DocumentIndexOrder) + ) +); + +list.Count.Should().Be(2); + +var asyncResponse = await client.SearchAsync(s => s + .AllTypes() + .AllIndices() + .Scroll("2m") + .Sort(ss => ss + .Ascending(SortSpecialField.DocumentIndexOrder) + ) +); + +list.Count.Should().Be(4); + +list.ShouldAllBeEquivalentTo(new [] + { + "POST http://localhost:9200/_search?scroll=2m \n{\"sort\":[{\"_doc\":{\"order\":\"asc\"}}]}", + "Status: 200\n------------------------------\n", + "POST http://localhost:9200/_search?scroll=2m \n{\"sort\":[{\"_doc\":{\"order\":\"asc\"}}]}", + "Status: 200\n------------------------------\n" + }); +---- +<1> Here we use `InMemoryConnection`; in reality you would use another type of `IConnection` that actually makes a request. + +[[configuring-ssl]] +=== Configuring SSL + +SSL must be configured outside of the client using .NET's http://msdn.microsoft.com/en-us/library/system.net.servicepointmanager%28v=vs.110%29.aspx[ServicePointManager] +class and setting the http://msdn.microsoft.com/en-us/library/system.net.servicepointmanager.servercertificatevalidationcallback.aspx[ServerCertificateValidationCallback] +property. + +The bare minimum to make .NET accept self-signed SSL certs that are not in the Window's CA store would be to have the callback simply return `true`: + +[source,csharp] +---- +ServicePointManager.ServerCertificateValidationCallback += (sender, cert, chain, errors) => true; +---- + +However, this will accept **all** requests from the AppDomain to untrusted SSL sites, +therefore **we recommend doing some minimal introspection on the passed in certificate.** + +IMPORTANT: Using `ServicePointManager` does not work on **Core CLR** as the request does not go through `ServicePointManager`; please file an {github}/issues[issue] if you need support for certificate validation on Core CLR. + +=== Overriding default Json.NET behavior + +Overriding the default Json.NET behaviour in NEST is an expert behavior but if you need to get to the nitty gritty, this can be really useful. +First, create a subclass of the `JsonNetSerializer` + +[source,csharp] +---- +public class MyJsonNetSerializer : JsonNetSerializer +{ + public MyJsonNetSerializer(IConnectionSettingsValues settings) : base(settings) { } + + public int CallToModify { get; set; } = 0; + + protected override void ModifyJsonSerializerSettings(JsonSerializerSettings settings) => ++CallToModify; <1> + + public int CallToContractConverter { get; set; } = 0; + + protected override IList> ContractConverters => new List> <2> + { + t => { + CallToContractConverter++; + return null; + } + }; + +} +---- +<1> Override ModifyJsonSerializerSettings if you need access to `JsonSerializerSettings` + +<2> You can inject contract resolved converters by implementing the ContractConverters property. This can be much faster then registering them on `JsonSerializerSettings.Converters` + +You can then register a factory on `ConnectionSettings` to create an instance of your subclass instead. +This is **_called once per instance_** of ConnectionSettings. + +[source,csharp] +---- +var connectionPool = new SingleNodeConnectionPool(new Uri("http://localhost:9200")); + +var settings = new ConnectionSettings(connectionPool, new InMemoryConnection(), s => new MyJsonNetSerializer(s)); + +var client = new ElasticClient(settings); + +client.RootNodeInfo(); + +client.RootNodeInfo(); + +var serializer = ((IConnectionSettingsValues)settings).Serializer as MyJsonNetSerializer; + +serializer.CallToModify.Should().BeGreaterThan(0); + +serializer.SerializeToString(new Project { }); + +serializer.CallToContractConverter.Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/client-concepts/low-level/lifetimes.asciidoc b/docs/asciidoc/client-concepts/low-level/lifetimes.asciidoc new file mode 100644 index 00000000000..6647c5208d8 --- /dev/null +++ b/docs/asciidoc/client-concepts/low-level/lifetimes.asciidoc @@ -0,0 +1,93 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[lifetimes]] +== Lifetimes + +If you are using an IOC container its always useful to know the best practices around the lifetime of your objects + +In general we advise folks to register their ElasticClient instances as singletons. The client is thread safe +so sharing an instance between threads is fine. + +Zooming in however the actual moving part that benefits the most from being static for most of the duration of your +application is `ConnectionSettings`; caches are __per__ `ConnectionSettings`. + +In some applications it could make perfect sense to have multiple singleton `ElasticClient`'s registered with different +connection settings. e.g if you have 2 functionally isolated Elasticsearch clusters. + +IMPORTANT: Due to the semantic versioning of Elasticsearch.Net and NEST and their alignment to versions of Elasticsearch, all instances of `ElasticClient` and +Elasticsearch clusters that are connected to must be on the **same major version** i.e. it is not possible to have both an `ElasticClient` to connect to +Elasticsearch 1.x _and_ 2.x in the same application as the former would require NEST 1.x and the latter, NEST 2.x. + +Let's demonstrate which components are disposed by creating our own derived `ConnectionSettings`, `IConnectionPool` and `IConnection` types + +[source,csharp] +---- +class AConnectionSettings : ConnectionSettings +{ + public AConnectionSettings(IConnectionPool pool, IConnection connection) + : base(pool, connection) + { } + public bool IsDisposed { get; private set; } + protected override void DisposeManagedResources() + { + this.IsDisposed = true; + base.DisposeManagedResources(); + } +} + +class AConnectionPool : SingleNodeConnectionPool +{ + public AConnectionPool(Uri uri, IDateTimeProvider dateTimeProvider = null) : base(uri, dateTimeProvider) { } + + public bool IsDisposed { get; private set; } + protected override void DisposeManagedResources() + { + this.IsDisposed = true; + base.DisposeManagedResources(); + } +} + +class AConnection : InMemoryConnection +{ + public bool IsDisposed { get; private set; } + protected override void DisposeManagedResources() + { + this.IsDisposed = true; + base.DisposeManagedResources(); + } +} +---- + +`ConnectionSettings`, `IConnectionPool` and `IConnection` all explictily implement `IDisposable` + +[source,csharp] +---- +var connection = new AConnection(); +var connectionPool = new AConnectionPool(new Uri("http://localhost:9200")); +var settings = new AConnectionSettings(connectionPool, connection); +settings.IsDisposed.Should().BeFalse(); +connectionPool.IsDisposed.Should().BeFalse(); +connection.IsDisposed.Should().BeFalse(); +---- + +Disposing `ConnectionSettings` will also dispose the `IConnectionPool` and `IConnection` it uses + +[source,csharp] +---- +var connection = new AConnection(); + +var connectionPool = new AConnectionPool(new Uri("http://localhost:9200")); + +var settings = new AConnectionSettings(connectionPool, connection); + +settings.IsDisposed.Should().BeTrue(); + +connectionPool.IsDisposed.Should().BeTrue(); + +connection.IsDisposed.Should().BeTrue(); +---- + diff --git a/docs/asciidoc/ClientConcepts/LowLevel/pipeline.png b/docs/asciidoc/client-concepts/low-level/pipeline.png similarity index 100% rename from docs/asciidoc/ClientConcepts/LowLevel/pipeline.png rename to docs/asciidoc/client-concepts/low-level/pipeline.png diff --git a/docs/asciidoc/client-concepts/low-level/post-data.asciidoc b/docs/asciidoc/client-concepts/low-level/post-data.asciidoc new file mode 100644 index 00000000000..f64dd59ae82 --- /dev/null +++ b/docs/asciidoc/client-concepts/low-level/post-data.asciidoc @@ -0,0 +1,69 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[post-data]] +== Post data + +The low level client allows you to post a `string` or `byte[]` array directly. On top of this, +if you pass a collection of `string` or `object` they will be serialized +using Elasticsearch's special bulk/multi format. + +=== Implicit Conversion + +Even though the argument for PostData on the low level client takes a `PostData`, +You can rely on implicit conversion to abstract the notion of PostData completely. +You can implicitly convert from the following types + +* `string` + +* `byte[]` + +* collection of `string` + +* collection of `object` + +* `object` + +[source,csharp] +---- +var fromString = ImplicitlyConvertsFrom(@string); + +var fromByteArray = ImplicitlyConvertsFrom(bytes); +var fromListOfString = ImplicitlyConvertsFrom(listOfStrings); +var fromListOfObject = ImplicitlyConvertsFrom(listOfObjects); +var fromObject = ImplicitlyConvertsFrom(@object); +---- + +PostData bytes will always be set if it originated from `byte[]` + +[source,csharp] +---- +fromByteArray.WrittenBytes.Should().BeSameAs(bytes); + +fromString.Type.Should().Be(PostType.LiteralString); +fromByteArray.Type.Should().Be(PostType.ByteArray); +fromListOfString.Type.Should().Be(PostType.EnumerableOfString); +fromListOfObject.Type.Should().Be(PostType.EnumerableOfObject); +fromObject.Type.Should().Be(PostType.Serializable); +---- + +and passing a `PostData` object to a method taking `PostData` should not wrap + +[source,csharp] +---- +fromString = ImplicitlyConvertsFrom(fromString); + +fromByteArray = ImplicitlyConvertsFrom(fromByteArray); +fromListOfString = ImplicitlyConvertsFrom(fromListOfString); +fromListOfObject = ImplicitlyConvertsFrom(fromListOfObject); +fromObject = ImplicitlyConvertsFrom(fromObject); +fromString.Type.Should().Be(PostType.LiteralString); +fromByteArray.Type.Should().Be(PostType.ByteArray); +fromListOfString.Type.Should().Be(PostType.EnumerableOfString); +fromListOfObject.Type.Should().Be(PostType.EnumerableOfObject); +fromObject.Type.Should().Be(PostType.Serializable); +---- + diff --git a/docs/asciidoc/code-standards/descriptors.asciidoc b/docs/asciidoc/code-standards/descriptors.asciidoc new file mode 100644 index 00000000000..9ac9cf21d9a --- /dev/null +++ b/docs/asciidoc/code-standards/descriptors.asciidoc @@ -0,0 +1,61 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[descriptors]] +== Descriptors + +Every descriptor should inherit from `DescriptorBase`, this hides object members from the fluent interface + +[source,csharp] +---- +var notDescriptors = new[] { typeof(ClusterProcessOpenFileDescriptors).Name, "DescriptorForAttribute" }; +var descriptors = from t in typeof(DescriptorBase<,>).Assembly().Types() + where t.IsClass() + && t.Name.Contains("Descriptor") + && !notDescriptors.Contains(t.Name) + && !t.GetInterfaces().Any(i => i == typeof(IDescriptor)) + select t.FullName; +descriptors.Should().BeEmpty(); +---- + +Methods taking a func should have that func return an interface + +[source,csharp] +---- +var descriptors = + from t in typeof(DescriptorBase<,>).Assembly().Types() + where t.IsClass() && typeof(IDescriptor).IsAssignableFrom(t) + select t; + +var selectorMethods = + from d in descriptors + from m in d.GetMethods() + let parameters = m.GetParameters() + from p in parameters + let type = p.ParameterType + let isGeneric = type.IsGeneric() + where isGeneric + let isFunc = type.GetGenericTypeDefinition() == typeof(Func<,>) + where isFunc + let firstFuncArg = type.GetGenericArguments().First() + let secondFuncArg = type.GetGenericArguments().Last() + let isQueryFunc = firstFuncArg.IsGeneric() && + firstFuncArg.GetGenericTypeDefinition() == typeof(QueryContainerDescriptor<>) && + typeof(QueryContainer).IsAssignableFrom(secondFuncArg) + where !isQueryFunc + let isFluentDictionaryFunc = + firstFuncArg.IsGeneric() && + firstFuncArg.GetGenericTypeDefinition() == typeof(FluentDictionary<,>) && + secondFuncArg.IsGeneric() && + secondFuncArg.GetGenericTypeDefinition() == typeof(FluentDictionary<,>) + where !isFluentDictionaryFunc + let lastArgIsNotInterface = !secondFuncArg.IsInterface() + where lastArgIsNotInterface + select $"{m.Name} on {m.DeclaringType.Name}"; + +selectorMethods.Should().BeEmpty(); +---- + diff --git a/docs/asciidoc/code-standards/elastic-client.asciidoc b/docs/asciidoc/code-standards/elastic-client.asciidoc new file mode 100644 index 00000000000..f2f6cd70786 --- /dev/null +++ b/docs/asciidoc/code-standards/elastic-client.asciidoc @@ -0,0 +1,158 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[elastic-client]] +== Elastic Client + +[source,csharp] +---- +var requestParametersNotNamedRequest = + from m in typeof(IElasticClient).GetMethods() + from p in m.GetParameters() + where typeof(IRequest).IsAssignableFrom(p.ParameterType) + where !p.Name.Equals("request") + select $"method '{nameof(IElasticClient)}.{m.Name}' should have parameter name of 'request' but has a name of '{p.Name}'"; + +requestParametersNotNamedRequest.Should().BeEmpty(); +---- + +[source,csharp] +---- +var requestParameters = + (from m in typeof(IElasticClient).GetMethods() + from p in m.GetParameters() + where typeof(IRequest).IsAssignableFrom(p.ParameterType) + select p).ToList(); +---- + +[source,csharp] +---- +var fluentParametersNotNamedSelector = + from m in typeof (IElasticClient).GetMethods() + from p in m.GetParameters() + where p.ParameterType.BaseType() == typeof (MulticastDelegate) + where !p.Name.Equals("selector") + select $"method '{nameof(IElasticClient)}.{m.Name}' should have parameter name of 'selector' but has a name of '{p.Name}'"; +fluentParametersNotNamedSelector.Should().BeEmpty(); +---- + +[source,csharp] +---- +foreach (var requestParameter in requestParameters) + requestParameter.HasDefaultValue.Should().BeFalse(); +---- + +[source,csharp] +---- +var concreteMethodParametersDoNotMatchInterface = new List(); + +var interfaceMap = typeof(ElasticClient).GetInterfaceMap(typeof(IElasticClient)); +---- + +[source,csharp] +---- +foreach (var interfaceMethodInfo in typeof(IElasticClient).GetMethods()) + { + var indexOfInterfaceMethod = Array.IndexOf(interfaceMap.InterfaceMethods, interfaceMethodInfo); + var concreteMethod = interfaceMap.TargetMethods[indexOfInterfaceMethod]; + + var concreteParameters = concreteMethod.GetParameters(); + var interfaceParameters = interfaceMethodInfo.GetParameters(); + + for (int i = 0; i < concreteParameters.Length; i++) + { + var parameterInfo = concreteParameters[i]; + var interfaceParameter = interfaceParameters[i]; + + parameterInfo.Name.Should().Be(interfaceParameter.Name); + + if (parameterInfo.HasDefaultValue != interfaceParameter.HasDefaultValue) + concreteMethodParametersDoNotMatchInterface.Add( + $"'{interfaceParameter.Name}' parameter on concrete implementation of '{nameof(ElasticClient)}.{interfaceMethodInfo.Name}' to {(interfaceParameter.HasDefaultValue ? string.Empty : "NOT")} be optional"); + } + } + +concreteMethodParametersDoNotMatchInterface.Should().BeEmpty(); +---- + +[source,csharp] +---- +var methodGroups = + from methodInfo in typeof(IElasticClient).GetMethods() + where + typeof(IResponse).IsAssignableFrom(methodInfo.ReturnType) || + (methodInfo.ReturnType.IsGeneric() + && typeof(Task<>) == methodInfo.ReturnType.GetGenericTypeDefinition() + && typeof(IResponse).IsAssignableFrom(methodInfo.ReturnType.GetGenericArguments()[0])) + let method = new MethodWithRequestParameter(methodInfo) + group method by method.Name into methodGroup + select methodGroup; +---- + +[source,csharp] +---- +foreach (var methodGroup in methodGroups) + { + foreach (var asyncMethod in methodGroup.Where(g => g.IsAsync)) + { + var parameters = asyncMethod.MethodInfo.GetParameters(); + + var syncMethod = methodGroup.First(g => + !g.IsAsync + && g.MethodType == asyncMethod.MethodType + && g.MethodInfo.GetParameters().Length == parameters.Length + && (!asyncMethod.MethodInfo.IsGenericMethod || + g.MethodInfo.GetGenericArguments().Length == asyncMethod.MethodInfo.GetGenericArguments().Length)); + + asyncMethod.Parameter.HasDefaultValue.Should().Be(syncMethod.Parameter.HasDefaultValue, + $"sync and async versions of {asyncMethod.MethodType} '{nameof(ElasticClient)}{methodGroup.Key}' should match"); + } + } +---- + +[source,csharp] +---- +private class MethodWithRequestParameter + { + public string Name { get; } + + public MethodInfo MethodInfo { get; } + + public bool IsAsync { get; } + + public ClientMethodType MethodType { get; } + + public ParameterInfo Parameter { get; } + + public MethodWithRequestParameter(MethodInfo methodInfo) + { + Name = methodInfo.Name.EndsWith("Async") + ? methodInfo.Name.Substring(0, methodInfo.Name.Length - "Async".Length) + : methodInfo.Name; + + IsAsync = methodInfo.ReturnType.IsGeneric() && + methodInfo.ReturnType.GetGenericTypeDefinition() == typeof(Task<>); + + MethodInfo = methodInfo; + + var parameterInfo = methodInfo.GetParameters() + .FirstOrDefault(p => typeof(IRequest).IsAssignableFrom(p.ParameterType)); + + if (parameterInfo != null) + { + Parameter = parameterInfo; + MethodType = ClientMethodType.Initializer; + } + else + { + Parameter = methodInfo.GetParameters() + .First(p => p.ParameterType.BaseType() == typeof(MulticastDelegate)); + MethodType = ClientMethodType.Fluent; + } + } + } +---- + diff --git a/docs/asciidoc/code-standards/naming-conventions.asciidoc b/docs/asciidoc/code-standards/naming-conventions.asciidoc new file mode 100644 index 00000000000..8cdb53cdd70 --- /dev/null +++ b/docs/asciidoc/code-standards/naming-conventions.asciidoc @@ -0,0 +1,134 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[naming-conventions]] +== Naming Conventions + +NEST uses the following naming conventions (with _some_ exceptions). + +=== Class Names + +Abstract class names should end with a `Base` suffix + +[source,csharp] +---- +var exceptions = new[] +{ + typeof(DateMath) +}; +var abstractClassesNotEndingInBase = typeof(IRequest).Assembly().GetTypes() + .Where(t => t.IsClass() && t.IsAbstract() && !t.IsSealed() && !exceptions.Contains(t)) + .Where(t => !t.Name.Split('`')[0].EndsWith("Base")) + .Select(t => t.Name.Split('`')[0]) + .ToList(); +abstractClassesNotEndingInBase.Should().BeEmpty(); +---- + +Class names that end with `Base` suffix are abstract + +[source,csharp] +---- +var exceptions = new[] { typeof(DateMath) }; + +var baseClassesNotAbstract = typeof(IRequest).Assembly().GetTypes() + .Where(t => t.IsClass() && !exceptions.Contains(t)) + .Where(t => t.Name.Split('`')[0].EndsWith("Base")) + .Where(t => !t.IsAbstractClass()) + .Select(t => t.Name.Split('`')[0]) + .ToList(); + +baseClassesNotAbstract.Should().BeEmpty(); +---- + +=== Requests and Responses + +Request class names should end with `Request` + +[source,csharp] +---- +var types = typeof(IRequest).Assembly().GetTypes(); + +var requestsNotEndingInRequest = types + .Where(t => typeof(IRequest).IsAssignableFrom(t) && !t.IsAbstract()) + .Where(t => !typeof(IDescriptor).IsAssignableFrom(t)) + .Where(t => !t.Name.Split('`')[0].EndsWith("Request")) + .Select(t => t.Name.Split('`')[0]) + .ToList(); + +requestsNotEndingInRequest.Should().BeEmpty(); +---- + +Response class names should end with `Response` + +[source,csharp] +---- +var types = typeof(IRequest).Assembly().GetTypes(); + +var responsesNotEndingInResponse = types + .Where(t => typeof(IResponse).IsAssignableFrom(t) && !t.IsAbstract()) + .Where(t => !t.Name.Split('`')[0].EndsWith("Response")) + .Select(t => t.Name.Split('`')[0]) + .ToList(); + +responsesNotEndingInResponse.Should().BeEmpty(); +---- + +Request and Response class names should be one to one in *most* cases. +e.g. `ValidateRequest` => `ValidateResponse`, and not `ValidateQueryRequest` => `ValidateResponse` +There are a few exceptions to this rule, most notably the `Cat` prefixed requests and +the `Exists` requests. + +[source,csharp] +---- +var exceptions = new[] <1> +{ + typeof(DocumentExistsRequest), + typeof(DocumentExistsRequest<>), + typeof(AliasExistsRequest), + typeof(IndexExistsRequest), + typeof(TypeExistsRequest), + typeof(IndexTemplateExistsRequest), + typeof(SearchTemplateRequest), + typeof(SearchTemplateRequest<>), + typeof(ScrollRequest), + typeof(SourceRequest), + typeof(SourceRequest<>), + typeof(ValidateQueryRequest<>), + typeof(GetAliasRequest), + typeof(IndicesShardStoresRequest), + typeof(RenderSearchTemplateRequest), + //UNMAPPED + typeof(ReindexRequest), + typeof(IngestDeletePipelineRequest), + typeof(IngestGetPipelineRequest), + typeof(IngestPutPipelineRequest), + typeof(IngestSimulateRequest), + typeof(TasksCancelRequest), + typeof(TasksListRequest), + typeof(UpdateByQueryRequest) +}; + +var types = typeof(IRequest).Assembly().GetTypes(); + +var requests = new HashSet(types + .Where(t => + t.IsClass() && + !t.IsAbstract() && + typeof(IRequest).IsAssignableFrom(t) && + !typeof(IDescriptor).IsAssignableFrom(t) + && !t.Name.StartsWith("Cat") + && !exceptions.Contains(t)) + .Select(t => t.Name.Split('`')[0].Replace("Request", "")) +); + +var responses = types + .Where(t => t.IsClass() && !t.IsAbstract() && typeof(IResponse).IsAssignableFrom(t)) + .Select(t => t.Name.Split('`')[0].Replace("Response", "")); + +requests.Except(responses).Should().BeEmpty(); +---- +<1> _Exceptions to the rule_ + diff --git a/docs/asciidoc/code-standards/queries.asciidoc b/docs/asciidoc/code-standards/queries.asciidoc new file mode 100644 index 00000000000..b78f193173f --- /dev/null +++ b/docs/asciidoc/code-standards/queries.asciidoc @@ -0,0 +1,63 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[queries]] +== Queries + +[source,csharp] +---- +var staticProperties = from p in typeof(Query<>).GetMethods() + let name = p.Name.StartsWith("GeoShape") ? "GeoShape" : p.Name + select name; + +var placeHolders = QueryPlaceHolderProperties.Select(p => p.Name.StartsWith("GeoShape") ? "GeoShape" : p.Name); + +staticProperties.Distinct().Should().Contain(placeHolders.Distinct()); +---- + +[source,csharp] +---- +var fluentMethods = from p in typeof(QueryContainerDescriptor<>).GetMethods() + let name = p.Name.StartsWith("GeoShape") ? "GeoShape" : p.Name + select name; + +var placeHolders = QueryPlaceHolderProperties.Select(p => p.Name.StartsWith("GeoShape") ? "GeoShape" : p.Name); + +fluentMethods.Distinct().Should().Contain(placeHolders.Distinct()); +---- + +[source,csharp] +---- +var skipQueryImplementations = new[] { typeof(IFieldNameQuery), typeof(IFuzzyQuery<,>), typeof(IConditionlessQuery) }; + +var queries = typeof(IQuery).Assembly().ExportedTypes + .Where(t => t.IsInterface() && typeof(IQuery).IsAssignableFrom(t)) + .Where(t => !skipQueryImplementations.Contains(t)) + .ToList(); + +queries.Should().NotBeEmpty(); + +var visitMethods = typeof(IQueryVisitor).GetMethods().Where(m => m.Name == "Visit"); + +visitMethods.Should().NotBeEmpty(); + +var missingTypes = from q in queries + let visitMethod = visitMethods.FirstOrDefault(m => m.GetParameters().First().ParameterType == q) + where visitMethod == null + select q; + +missingTypes.Should().BeEmpty(); +---- + +[source,csharp] +---- +var properties = from p in QueryProperties + let a = p.GetCustomAttributes().Concat(p.GetCustomAttributes()) + where a.Count() != 1 + select p; +properties.Should().BeEmpty(); +---- + diff --git a/docs/asciidoc/CodeStandards/Serialization/Properties.doc.asciidoc b/docs/asciidoc/code-standards/serialization/properties.asciidoc similarity index 76% rename from docs/asciidoc/CodeStandards/Serialization/Properties.doc.asciidoc rename to docs/asciidoc/code-standards/serialization/properties.asciidoc index 7814362db9b..1bfc7353ec6 100644 --- a/docs/asciidoc/CodeStandards/Serialization/Properties.doc.asciidoc +++ b/docs/asciidoc/code-standards/serialization/properties.asciidoc @@ -1,8 +1,15 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current -Our Json.NET contract resolver picks up attributes set on the interface +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages +[[properties]] +== Properties -[source, csharp] +Our Json.NET contract resolver picks up attributes set on the interface + +[source,csharp] ---- var pool = new SingleNodeConnectionPool(new Uri("http://localhost:9200")); var settings = new ConnectionSettings(pool, new InMemoryConnection()); @@ -14,3 +21,4 @@ serialized = c.Serializer.SerializeToString(new AnalysisDescriptor().CharFilters serialized.Should().NotContain("char_filters").And.NotContain("charFilters"); serialized.Should().Contain("char_filter"); ---- + diff --git a/docs/asciidoc/common-options.asciidoc b/docs/asciidoc/common-options.asciidoc new file mode 100644 index 00000000000..b0d00578726 --- /dev/null +++ b/docs/asciidoc/common-options.asciidoc @@ -0,0 +1,24 @@ +:output-dir: common-options + +[[common-options]] += Common Options + +[partintro] +-- +NEST has a number of types for working with Elasticsearch conventions for: + + +* <> + +* <> + +* <> + +-- + +include::{output-dir}/time-unit/time-units.asciidoc[] + +include::{output-dir}/distance-unit/distance-units.asciidoc[] + +include::{output-dir}/date-math/date-math-expressions.asciidoc[] + diff --git a/docs/asciidoc/common-options/date-math/date-math-expressions.asciidoc b/docs/asciidoc/common-options/date-math/date-math-expressions.asciidoc new file mode 100644 index 00000000000..93645822ee9 --- /dev/null +++ b/docs/asciidoc/common-options/date-math/date-math-expressions.asciidoc @@ -0,0 +1,133 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[date-math-expressions]] +== Date Math Expressions + +The date type supports using date math expression when using it in a query/filter +Whenever durations need to be specified, eg for a timeout parameter, the duration can be specified + +The expression starts with an "anchor" date, which can be either now or a date string (in the applicable format) ending with `||`. +It can then follow by a math expression, supporting `+`, `-` and `/` (rounding). +The units supported are + +* `y` (year) + +* `M` (month) + +* `w` (week) + +* `d` (day) + +* `h` (hour) + +* `m` (minute) + +* `s` (second) + +as a whole number representing time in milliseconds, or as a time value like `2d` for 2 days. + +:datemath: {ref_current}/common-options.html#date-math + +Be sure to read the Elasticsearch documentation on {datemath}[Date Math]. + +=== Simple Expressions + +You can create simple expressions using any of the static methods on `DateMath` + +[source,csharp] +---- +Expect("now").WhenSerializing(Nest.DateMath.Now); + +Expect("2015-05-05T00:00:00").WhenSerializing(Nest.DateMath.Anchored(new DateTime(2015,05, 05))); +---- + +strings implicitly convert to `DateMath` + +[source,csharp] +---- +Expect("now").WhenSerializing("now"); +---- + +but are lenient to bad math expressions + +[source,csharp] +---- +var nonsense = "now||*asdaqwe"; +---- + +the resulting date math will assume the whole string is the anchor + +[source,csharp] +---- +Expect(nonsense).WhenSerializing(nonsense) +.Result(dateMath => ((IDateMath)dateMath) + .Anchor.Match( + d => d.Should().NotBe(default(DateTime)), + s => s.Should().Be(nonsense) + ) + ); +---- + +`DateTime` also implicitly convert to simple date math expressions + +[source,csharp] +---- +var date = new DateTime(2015, 05, 05); +---- + +the anchor will be an actual `DateTime`, even after a serialization/deserialization round trip + +[source,csharp] +---- +Expect("2015-05-05T00:00:00").WhenSerializing(date) +.Result(dateMath => ((IDateMath)dateMath) + . Anchor.Match( + d => d.Should().Be(date), + s => s.Should().BeNull() + ) + ); +---- + +=== Complex Expressions + +Ranges can be chained on to simple expressions + +[source,csharp] +---- +Expect("now+1d").WhenSerializing( + Nest.DateMath.Now.Add("1d")); +---- + +Including multiple operations + +[source,csharp] +---- +Expect("now+1d-1m").WhenSerializing( + Nest.DateMath.Now.Add("1d").Subtract(TimeSpan.FromMinutes(1))); +---- + +A rounding value can be chained to the end of the expression, after which no more ranges can be appended + +[source,csharp] +---- +Expect("now+1d-1m/d").WhenSerializing( + Nest.DateMath.Now.Add("1d") + .Subtract(TimeSpan.FromMinutes(1)) + .RoundTo(Nest.TimeUnit.Day)); +---- + +When anchoring dates, a `||` needs to be appended as clear separator between the anchor and ranges. +Again, multiple ranges can be chained + +[source,csharp] +---- +Expect("2015-05-05T00:00:00||+1d-1m").WhenSerializing( + Nest.DateMath.Anchored(new DateTime(2015,05,05)) + .Add("1d") + .Subtract(TimeSpan.FromMinutes(1))); +---- + diff --git a/docs/asciidoc/common-options/distance-unit/distance-units.asciidoc b/docs/asciidoc/common-options/distance-unit/distance-units.asciidoc new file mode 100644 index 00000000000..c455d46b1ae --- /dev/null +++ b/docs/asciidoc/common-options/distance-unit/distance-units.asciidoc @@ -0,0 +1,124 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[distance-units]] +== Distance Units + +Whenever distances need to be specified, e.g. for a {ref_current}/query-dsl-geo-distance-query.html[geo distance query], +the distance unit can be specified as a double number representing distance in meters, as a new instance of +a `Distance`, or as a string of the form number and distance unit e.g. "`2.72km`" + +=== Using Distance units in NEST + +NEST uses `Distance` to strongly type distance units and there are several ways to construct one. + +==== Constructor + +The most straight forward way to construct a `Distance` is through its constructor + +[source,csharp] +---- +var unitComposed = new Distance(25); +var unitComposedWithUnits = new Distance(25, Nest.DistanceUnit.Meters); +---- + +`Distance` serializes to a string composed of a factor and distance unit. +The factor is a double so always has at least one decimal place when serialized + +[source,csharp] +---- +Expect("25.0m") + .WhenSerializing(unitComposed) + .WhenSerializing(unitComposedWithUnits); +---- + +==== Implicit conversion + +Alternatively a distance unit `string` can be assigned to a `Distance`, resulting in an implicit conversion to a new `Distance` instance. +If no `DistanceUnit` is specified, the default distance unit is meters + +[source,csharp] +---- +Distance distanceString = "25"; + +Distance distanceStringWithUnits = "25m"; + +Expect(new Distance(25)) + .WhenSerializing(distanceString) + .WhenSerializing(distanceStringWithUnits); +---- + +==== Supported units + +A number of distance units are supported, from millimeters to nautical miles + +===== Metric + +`mm` (Millimeters) + +[source,csharp] +---- +Expect("2.0mm").WhenSerializing(new Distance(2, Nest.DistanceUnit.Millimeters)); +---- + +`cm` (Centimeters) + +[source,csharp] +---- +Expect("123.456cm").WhenSerializing(new Distance(123.456, Nest.DistanceUnit.Centimeters)); +---- + +`m` (Meters) + +[source,csharp] +---- +Expect("400.0m").WhenSerializing(new Distance(400, Nest.DistanceUnit.Meters)); +---- + +`km` (Kilometers) + +[source,csharp] +---- +Expect("0.1km").WhenSerializing(new Distance(0.1, Nest.DistanceUnit.Kilometers)); +---- + +===== Imperial + +`in` (Inches) + +[source,csharp] +---- +Expect("43.23in").WhenSerializing(new Distance(43.23, Nest.DistanceUnit.Inch)); +---- + +`ft` (Feet) + +[source,csharp] +---- +Expect("3.33ft").WhenSerializing(new Distance(3.33, Nest.DistanceUnit.Feet)); +---- + +`yd` (Yards) + +[source,csharp] +---- +Expect("9.0yd").WhenSerializing(new Distance(9, Nest.DistanceUnit.Yards)); +---- + +`mi` (Miles) + +[source,csharp] +---- +Expect("0.62mi").WhenSerializing(new Distance(0.62, Nest.DistanceUnit.Miles)); +---- + +`nmi` or `NM` (Nautical Miles) + +[source,csharp] +---- +Expect("45.5nmi").WhenSerializing(new Distance(45.5, Nest.DistanceUnit.NauticalMiles)); +---- + diff --git a/docs/asciidoc/CommonOptions/TimeUnit/TimeUnits.doc.asciidoc b/docs/asciidoc/common-options/time-unit/time-units.asciidoc similarity index 64% rename from docs/asciidoc/CommonOptions/TimeUnit/TimeUnits.doc.asciidoc rename to docs/asciidoc/common-options/time-unit/time-units.asciidoc index a68dbb3deae..37d3d63aae6 100644 --- a/docs/asciidoc/CommonOptions/TimeUnit/TimeUnits.doc.asciidoc +++ b/docs/asciidoc/common-options/time-unit/time-units.asciidoc @@ -1,115 +1,171 @@ -# Time units +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[time-units]] +== Time units + Whenever durations need to be specified, eg for a timeout parameter, the duration can be specified as a whole number representing time in milliseconds, or as a time value like `2d` for 2 days. -## Using Time units in NEST +=== Using Time units in NEST + NEST uses `Time` to strongly type this and there are several ways to construct one. -### Constructor -The most straight forward way to construct a `Time` is through its constructor +==== Constructor +The most straight forward way to construct a `Time` is through its constructor -[source, csharp] +[source,csharp] ---- var unitString = new Time("2d"); var unitComposed = new Time(2, Nest.TimeUnit.Day); var unitTimeSpan = new Time(TimeSpan.FromDays(2)); var unitMilliseconds = new Time(1000 * 60 * 60 * 24 * 2); ---- -When serializing Time constructed from a string, milliseconds, composition of factor and -interval, or a `TimeSpan` the expression will be serialized as time unit string -[source, csharp] +When serializing Time constructed from + +* a string + +* milliseconds (as a double) + +* composition of factor and interval + +* a `TimeSpan` + +the expression will be serialized to a time unit string composed of the factor and interval e.g. `2d` + +[source,csharp] ---- Expect("2d") - .WhenSerializing(unitString) - .WhenSerializing(unitComposed) - .WhenSerializing(unitTimeSpan) - .WhenSerializing(unitMilliseconds); + .WhenSerializing(unitString) + .WhenSerializing(unitComposed) + .WhenSerializing(unitTimeSpan) + .WhenSerializing(unitMilliseconds); ---- -Milliseconds are always calculated even when not using the constructor that takes a long -[source, csharp] +The `Milliseconds` property on `Time` is calculated even when not using the constructor that takes a double + +[source,csharp] ---- unitMilliseconds.Milliseconds.Should().Be(1000*60*60*24*2); ----- -[source, csharp] ----- + unitComposed.Milliseconds.Should().Be(1000*60*60*24*2); unitTimeSpan.Milliseconds.Should().Be(1000*60*60*24*2); unitString.Milliseconds.Should().Be(1000*60*60*24*2); ---- -### Implicit conversion -Alternatively `string`, `TimeSpan` and `double` can be implicitly assigned to `Time` properties and variables +==== Implicit conversion +Alternatively to using the constructor, `string`, `TimeSpan` and `double` can be implicitly converted to `Time` -[source, csharp] +[source,csharp] ---- Time oneAndHalfYear = "1.5y"; + Time twoWeeks = TimeSpan.FromDays(14); + Time twoDays = 1000*60*60*24*2; + Expect("1.5y").WhenSerializing(oneAndHalfYear); + Expect("2w").WhenSerializing(twoWeeks); + Expect("2d").WhenSerializing(twoDays); +---- + +[source,csharp] +---- Time oneAndHalfYear = "1.5y"; + Time twoWeeks = TimeSpan.FromDays(14); + Time twoDays = 1000*60*60*24*2; ---- -Milliseconds are calculated even when values are not passed as long -[source, csharp] +Milliseconds are calculated even when values are not passed as long... + +[source,csharp] ---- -oneAndHalfYear.Milliseconds.Should().BeGreaterThan(1); +twoWeeks.Milliseconds.Should().BeGreaterThan(1); ---- -[source, csharp] + +...**except** when dealing with years or months, whose millsecond value cannot +be calculated *accurately*, since they are not fixed durations. For instance, +30 vs 31 vs 28 days in a month, or 366 vs 365 days in a year. +In this instance, Milliseconds will be -1. + +[source,csharp] ---- -twoWeeks.Milliseconds.Should().BeGreaterThan(1); +oneAndHalfYear.Milliseconds.Should().Be(-1); ---- + This allows you to do comparisons on the expressions -[source, csharp] +[source,csharp] ---- oneAndHalfYear.Should().BeGreaterThan(twoWeeks); ----- -[source, csharp] ----- + (oneAndHalfYear > twoWeeks).Should().BeTrue(); + (oneAndHalfYear >= twoWeeks).Should().BeTrue(); + (twoDays >= new Time("2d")).Should().BeTrue(); + twoDays.Should().BeLessThan(twoWeeks); + (twoDays < twoWeeks).Should().BeTrue(); + (twoDays <= twoWeeks).Should().BeTrue(); + (twoDays <= new Time("2d")).Should().BeTrue(); ---- + And assert equality -[source, csharp] +[source,csharp] ---- twoDays.Should().Be(new Time("2d")); ----- -[source, csharp] ----- + (twoDays == new Time("2d")).Should().BeTrue(); + (twoDays != new Time("2.1d")).Should().BeTrue(); + (new Time("2.1d") == new Time(TimeSpan.FromDays(2.1))).Should().BeTrue(); + +(new Time("1") == new Time(1)).Should().BeTrue(); + +(new Time("-1") == new Time(-1)).Should().BeTrue(); ---- -Time units are specified as a union of either a `DateInterval` or `Time` + +=== Units of Time + +Units of `Time` are specified as a union of either a `DateInterval` or `Time`, both of which implicitly convert to the `Union` of these two. -[source, csharp] +[source,csharp] ---- Expect("month").WhenSerializing>(DateInterval.Month); ----- -[source, csharp] ----- + Expect("day").WhenSerializing>(DateInterval.Day); + Expect("hour").WhenSerializing>(DateInterval.Hour); + Expect("minute").WhenSerializing>(DateInterval.Minute); + Expect("quarter").WhenSerializing>(DateInterval.Quarter); + Expect("second").WhenSerializing>(DateInterval.Second); + Expect("week").WhenSerializing>(DateInterval.Week); + Expect("year").WhenSerializing>(DateInterval.Year); + Expect("2d").WhenSerializing>((Time)"2d"); + Expect("1.16w").WhenSerializing>((Time)TimeSpan.FromDays(8.1)); ---- + diff --git a/docs/asciidoc/connection-pooling.asciidoc b/docs/asciidoc/connection-pooling.asciidoc new file mode 100644 index 00000000000..58bc6ae0e48 --- /dev/null +++ b/docs/asciidoc/connection-pooling.asciidoc @@ -0,0 +1,64 @@ +:output-dir: client-concepts/connection-pooling + +:building-blocks: {output-dir}/building-blocks + +:sniffing: {output-dir}/sniffing + +:pinging: {output-dir}/pinging + +:round-robin: {output-dir}/round-robin + +:failover: {output-dir}/failover + +:max-retries: {output-dir}/max-retries + +:request-overrides: {output-dir}/request-overrides + +:exceptions: {output-dir}/exceptions + +include::{building-blocks}/connection-pooling.asciidoc[] + +include::{building-blocks}/request-pipelines.asciidoc[] + +include::{building-blocks}/transports.asciidoc[] + +include::{building-blocks}/keeping-track-of-nodes.asciidoc[] + +include::{building-blocks}/date-time-providers.asciidoc[] + +include::{sniffing}/on-startup.asciidoc[] + +include::{sniffing}/on-connection-failure.asciidoc[] + +include::{sniffing}/on-stale-cluster-state.asciidoc[] + +include::{sniffing}/role-detection.asciidoc[] + +include::{pinging}/first-usage.asciidoc[] + +include::{pinging}/revival.asciidoc[] + +include::{round-robin}/round-robin.asciidoc[] + +include::{round-robin}/skip-dead-nodes.asciidoc[] + +include::{round-robin}/volatile-updates.asciidoc[] + +include::{failover}/falling-over.asciidoc[] + +include::{max-retries}/respects-max-retry.asciidoc[] + +include::{request-overrides}/disable-sniff-ping-per-request.asciidoc[] + +include::{request-overrides}/request-timeouts-overrides.asciidoc[] + +include::{request-overrides}/respects-max-retry-overrides.asciidoc[] + +include::{request-overrides}/respects-allowed-status-code.asciidoc[] + +include::{request-overrides}/respects-force-node.asciidoc[] + +include::{exceptions}/unexpected-exceptions.asciidoc[] + +include::{exceptions}/unrecoverable-exceptions.asciidoc[] + diff --git a/docs/asciidoc/hadouken-indentation.jpg b/docs/asciidoc/hadouken-indentation.jpg new file mode 100644 index 00000000000..afe03b960d1 Binary files /dev/null and b/docs/asciidoc/hadouken-indentation.jpg differ diff --git a/docs/asciidoc/high-level.asciidoc b/docs/asciidoc/high-level.asciidoc new file mode 100644 index 00000000000..ff18811690f --- /dev/null +++ b/docs/asciidoc/high-level.asciidoc @@ -0,0 +1,66 @@ +:output-dir: client-concepts/high-level + +[[nest]] += Client Concepts - NEST + +[partintro] +-- +The high level client, `ElasticClient`, provides a strongly typed query DSL that maps one-to-one with the Elasticsearch query DSL. + +It can be installed from the Package Manager Console inside Visual Studio using + + +[source,shell] +---- +Install-Package NEST +---- + + +Or by searching for https://www.nuget.org/packages/NEST[NEST] in the Package Manager GUI. + +NEST internally uses and still exposes the low level client, `ElasticLowLevelClient`, from <> via +the `.LowLevel` property on `ElasticClient`. + +There are a number of conventions that NEST uses for inference of + + +* <> + +* <> + +* <> and <> + +* <> + +* <> + +* <> + + +In addition to features such as + + +* <> + +* <> + +-- + +include::{output-dir}/inference/index-name-inference.asciidoc[] + +include::{output-dir}/inference/indices-paths.asciidoc[] + +include::{output-dir}/inference/field-inference.asciidoc[] + +include::{output-dir}/inference/property-inference.asciidoc[] + +include::{output-dir}/inference/ids-inference.asciidoc[] + +include::{output-dir}/inference/document-paths.asciidoc[] + +include::{output-dir}/inference/features-inference.asciidoc[] + +include::{output-dir}/mapping/auto-map.asciidoc[] + +include::{output-dir}/covariant-hits/covariant-search-results.asciidoc[] + diff --git a/docs/asciidoc/index.asciidoc b/docs/asciidoc/index.asciidoc index 873adf55c98..6c37a9de3e2 100644 --- a/docs/asciidoc/index.asciidoc +++ b/docs/asciidoc/index.asciidoc @@ -1,53 +1,15 @@ -# Introduction +[[elasticsearch-net-reference]] += Elasticsearch.Net and NEST: the .NET clients -You've reached the documentation page for `Elasticsearch.Net` and `NEST`. The two official .NET clients for Elasticsearch. So why two clients I hear you say? +include::intro.asciidoc[] -`Elasticsearch.Net` is a very low level, dependency free, client that has no opinions about how you build and represent your requests and responses. It has abstracted -enough so that **all** the Elasticsearch API endpoints are represented as methods but not too much to get in the way of how you want to build your json/request/response objects. It also comes with builtin, configurable/overridable, cluster failover retry mechanisms. Elasticsearch is elastic so why not your client? +include::client-concepts.asciidoc[] -`NEST` is a high level client that has the advantage of having mapped all the request and response objects, comes with a strongly typed query DSL that maps 1 to 1 with the Elasticsearch query DSL, and takes advantage of specific .NET features such as covariant results. NEST internally uses, and still exposes, the low level `Elasticsearch.Net` client. +include::common-options.asciidoc[] -Please read the getting started guide for both. +include::search.asciidoc[] +include::query-dsl.asciidoc[] -## Who's using Nest -* [stackoverflow.com](http://www.stackoverflow.com) (and the rest of the stackexchange family). -* [7digital.com](http://www.7digital.com) (run NEST on mono). -* [rijksmuseum.nl](https://www.rijksmuseum.nl/en) (Elasticsearch is the only datastorage hit for each page). -* [Kiln](http://www.fogcreek.com/kiln/) FogCreek's version control & code review tooling. - They are so pleased with Elasticsearch that [they made a video about how pleased they are!](http://blog.fogcreek.com/kiln-powered-by-elasticsearch/) - -## Other resources - -[@joelabrahamsson](http://twitter.com/joelabrahamsson) wrote a great [intro into elasticsearch on .NET](http://joelabrahamsson.com/entry/extending-aspnet-mvc-music-store-with-elasticsearch) -using NEST. - -Also checkout the [searchbox.io guys](https://searchbox.io/) rocking NEST [on AppHarbor](http://blog.appharbor.com/2012/06/19/searchbox-elasticsearch-is-now-an-add-on) -with their [demo project](https://github.com/searchbox-io/.net-sample) - -## Questions, bugs, comments, requests - -All of these are more then welcome on the github issues pages! We try to to at least reply within the same day. - -We also monitor question tagged with ['nest' on stackoverflow](http://stackoverflow.com/questions/tagged/nest) or -['elasticsearch-net' on stackoverflow](http://stackoverflow.com/questions/tagged/elasticsearch-net) - -# License - -This software is licensed under the Apache 2 license, quoted below. - - Copyright (c) 2014 Elasticsearch - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - +include::aggregations.asciidoc[] diff --git a/docs/asciidoc/intro.asciidoc b/docs/asciidoc/intro.asciidoc new file mode 100644 index 00000000000..77fdcb15bb5 --- /dev/null +++ b/docs/asciidoc/intro.asciidoc @@ -0,0 +1,28 @@ +:github: https://github.com/elastic/elasticsearch-net + +:stackoverflow: http://stackoverflow.com + +[[introduction]] +== Introduction + +You've reached the documentation page for `Elasticsearch.Net` and `NEST`, The two official .NET clients for Elasticsearch. + +=== Why two clients? + +`Elasticsearch.Net` is a very low level, dependency free, client that has no opinions about how you build and represent your requests and responses. It has abstracted +enough so that **all** the Elasticsearch API endpoints are represented as methods but not too much to get in the way of how you want to build +your json/request/response objects. It also comes with built in, configurable/overridable, cluster failover retry mechanisms. Elasticsearch is _elastic_ so why not your client? + +`NEST` is a high level client that has the advantage of having mapped all the request and response objects, +comes with a strongly typed query DSL that maps 1 to 1 with the Elasticsearch query DSL, and takes advantage of specific .NET features such as +<> and <>. NEST internally uses and still exposes the low level `Elasticsearch.Net` client. + +Please read the getting started guide for both <> and <>. + +=== Questions, bugs, comments, requests + +All of these are more then welcome on the {github}/issues[github issues pages]! We try to at least reply within the same day. + +We also monitor question tagged with {stackoverflow}/questions/tagged/nest['nest' on stackoverflow] or +{stackoverflow}/questions/tagged/elasticsearch-net['elasticsearch-net' on stackoverflow], as well as https://discuss.elastic.co[discussions on our discourse site] + diff --git a/docs/asciidoc/low-level.asciidoc b/docs/asciidoc/low-level.asciidoc new file mode 100644 index 00000000000..4abee9ec908 --- /dev/null +++ b/docs/asciidoc/low-level.asciidoc @@ -0,0 +1,31 @@ +:output-dir: client-concepts/low-level + +[[elasticsearch-net]] += Client Concepts - Elasticsearch.Net + +[partintro] +-- +The low level client, `ElasticLowLevelClient`, is a low level, dependency free client that has no +opinions about how you build and represent your requests and responses. + +It can be installed from the Package Manager Console inside Visual Studio using + + +[source,shell] +---- +Install-Package Elasticsearch.Net +---- + + +Or by searching for https://www.nuget.org/packages/Elasticsearch.Net[Elasticsearch.Net] in the Package Manager GUI. + +-- + +include::{output-dir}/connecting.asciidoc[] + +include::{output-dir}/lifetimes.asciidoc[] + +include::{output-dir}/post-data.asciidoc[] + +include::connection-pooling.asciidoc[] + diff --git a/docs/asciidoc/pipeline.png b/docs/asciidoc/pipeline.png new file mode 100644 index 00000000000..b15d2f0f8b6 Binary files /dev/null and b/docs/asciidoc/pipeline.png differ diff --git a/docs/asciidoc/query-dsl-usage.asciidoc b/docs/asciidoc/query-dsl-usage.asciidoc new file mode 100644 index 00000000000..14bc1650a65 --- /dev/null +++ b/docs/asciidoc/query-dsl-usage.asciidoc @@ -0,0 +1,134 @@ +:includes-from-dirs: query-dsl/compound,query-dsl/geo,query-dsl/joining,query-dsl/nest-specific,query-dsl/span,query-dsl/specialized,query-dsl/term-level + +include::../../docs/asciidoc/query-dsl/compound/and/and-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/bool/bool-dsl-complex-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/bool/bool-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/boosting/boosting-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/constant-score/constant-score-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/dismax/dismax-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/filtered/filtered-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/full-text/common-terms/common-terms-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/full-text/match/match-phrase-prefix-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/full-text/match/match-phrase-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/full-text/match/match-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/full-text/multi-match/multi-match-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/full-text/query-string/query-string-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/full-text/simple-query-string/simple-query-string-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/function-score/function-score-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/indices/indices-no-match-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/indices/indices-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/limit/limit-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/not/not-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/compound/or/or-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/geo/bounding-box/geo-bounding-box-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/geo/distance/geo-distance-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/geo/distance-range/geo-distance-range-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/geo/hash-cell/geo-hash-cell-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/geo/polygon/geo-polygon-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/geo/shape/circle/geo-shape-circle-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/geo/shape/envelope/geo-envelope-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/geo/shape/indexed-shape/geo-indexed-shape-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/geo/shape/line-string/geo-line-string-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/geo/shape/multi-line-string/geo-multi-line-string-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/geo/shape/multi-point/geo-multi-point-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/geo/shape/point/geo-point-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/geo/shape/polygon/geo-polygon-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/joining/has-child/has-child-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/joining/has-parent/has-parent-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/joining/nested/nested-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/nest-specific/raw/raw-combine-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/nest-specific/raw/raw-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/span/container/span-containing-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/span/first/span-first-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/span/multi-term/span-multi-term-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/span/near/span-near-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/span/not/span-not-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/span/or/span-or-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/span/term/span-term-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/span/within/span-within-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/specialized/more-like-this/more-like-this-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/specialized/script/script-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/specialized/template/template-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/exists/exists-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/fuzzy/fuzzy-date-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/fuzzy/fuzzy-numeric-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/fuzzy/fuzzy-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/ids/ids-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/missing/missing-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/prefix/prefix-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/range/date-range-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/range/numeric-range-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/range/term-range-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/regexp/regexp-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/term/term-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/terms/terms-list-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/terms/terms-lookup-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/terms/terms-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/type/type-query-usage.asciidoc[] + +include::../../docs/asciidoc/query-dsl/term-level/wildcard/wildcard-query-usage.asciidoc[] + diff --git a/docs/asciidoc/query-dsl.asciidoc b/docs/asciidoc/query-dsl.asciidoc new file mode 100644 index 00000000000..c5bfb0d7965 --- /dev/null +++ b/docs/asciidoc/query-dsl.asciidoc @@ -0,0 +1,147 @@ +:output-dir: query-dsl + +[[query-dsl]] += Query DSL + +[partintro] +-- +NEST exposes all of the query DSL endpoints available in Elasticsearch + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +-- + +include::{output-dir}/bool-dsl/bool-dsl.asciidoc[] + +include::query-dsl-usage.asciidoc[] + diff --git a/docs/asciidoc/query-dsl/bool-dsl/bool-dsl.asciidoc b/docs/asciidoc/query-dsl/bool-dsl/bool-dsl.asciidoc new file mode 100644 index 00000000000..5025932e39a --- /dev/null +++ b/docs/asciidoc/query-dsl/bool-dsl/bool-dsl.asciidoc @@ -0,0 +1,324 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[bool-queries]] +== Bool Queries + +Writing boolean queries can grow verbose rather quickly when using the query DSL. For example, +take a single {ref_current}/query-dsl-bool-query.html[bool query] with only two clauses + +[source,csharp] +---- +var searchResults = this.Client.Search(s => s + .Query(q => q + .Bool(b => b + .Should( + bs => bs.Term(p => p.Name, "x"), + bs => bs.Term(p => p.Name, "y") + ) + ) + ) +); +---- + +Now, imagine multiple nested bools; you'll realise that this quickly becomes an exercise in _hadouken indenting_ + +[[indent]] +.hadouken indenting +image::hadouken-indentation.jpg[hadouken indenting] + +=== Operator Overloading + +For this reason, NEST introduces **operator overloading** so complex bool queries become easier to write. +The previous example now becomes the following with the fluent API + +[source,csharp] +---- +var searchResults = this.Client.Search(s => s + .Query(q => q.Term(p => p.Name, "x") || q.Term(p => p.Name, "y")) +); +---- + +or, using the object initializer syntax + +[source,csharp] +---- +searchResults = this.Client.Search(new SearchRequest +{ + Query = new TermQuery { Field = "name", Value= "x" } + || new TermQuery { Field = Field(p=>p.Name), Value = "y" } +}); +---- + +A naive implementation of operator overloading would rewrite + +`term && term && term` to + +.... +bool +|___must + |___term + |___bool + |___must + |___term + |___term +.... + +As you can image this becomes unwieldy quite fast the more complex a query becomes NEST can spot these and +join them together to become a single bool query + +.... +bool +|___must + |___term + |___term + |___term +.... + +[source,csharp] +---- +Assert( + q => q.Query() && q.Query() && q.Query(), + Query && Query && Query, + c => c.Bool.Must.Should().HaveCount(3) + ); +---- + +The bool DSL offers also a short hand notation to mark a query as a `must_not` using the `!` operator + +[source,csharp] +---- +Assert(q => !q.Query(), !Query, c => c.Bool.MustNot.Should().HaveCount(1)); +---- + +And to mark a query as a `filter` using the `+` operator + +[source,csharp] +---- +Assert(q => +q.Query(), +Query, c => c.Bool.Filter.Should().HaveCount(1)); +---- + +Both of these can be combined with `&&` to form a single bool query + +[source,csharp] +---- +Assert(q => !q.Query() && !q.Query(), !Query && !Query, c => c.Bool.MustNot.Should().HaveCount(2)); +---- + +[source,csharp] +---- +Assert(q => +q.Query() && +q.Query(), +Query && +Query, c => c.Bool.Filter.Should().HaveCount(2)); +---- + +=== Combining/Merging bool queries + +When combining multiple queries some or all possibly marked as `must_not` or `filter`, NEST still combines to a single bool query + +.... +bool +|___must +| |___term +| |___term +| |___term +| +|___must_not + |___term +.... + +[source,csharp] +---- +Assert( + q => q.Query() && q.Query() && q.Query() && !q.Query(), + Query && Query && Query && !Query, + c=> + { + c.Bool.Must.Should().HaveCount(3); + c.Bool.MustNot.Should().HaveCount(1); + }); + +c.Bool.Must.Should().HaveCount(3); + +c.Bool.MustNot.Should().HaveCount(1); +---- + +Even more involved `term && term && term && !term && +term && +term` still only results in a single `bool` query: + +.... +bool +|___must +| |___term +| |___term +| |___term +| +|___must_not +| |___term +| +|___filter + |___term + |___term +.... + +[source,csharp] +---- +Assert( + q => q.Query() && q.Query() && q.Query() && !q.Query() && +q.Query() && +q.Query(), + Query && Query && Query && !Query && +Query && +Query, + c => + { + c.Bool.Must.Should().HaveCount(3); + c.Bool.MustNot.Should().HaveCount(1); + c.Bool.Filter.Should().HaveCount(2); + }); + +c.Bool.Must.Should().HaveCount(3); + +c.Bool.MustNot.Should().HaveCount(1); + +c.Bool.Filter.Should().HaveCount(2); +---- + +You can still mix and match actual bool queries with the bool DSL e.g `bool(must=term, term, term) && !term` would still merge into a single `bool` query. + +[source,csharp] +---- +Assert( + q => q.Bool(b => b.Must(mq => mq.Query(), mq => mq.Query(), mq => mq.Query())) && !q.Query(), + new BoolQuery { Must = new QueryContainer[] { Query, Query, Query } } && !Query, + c => + { + c.Bool.Must.Should().HaveCount(3); + c.Bool.MustNot.Should().HaveCount(1); + }); + +c.Bool.Must.Should().HaveCount(3); + +c.Bool.MustNot.Should().HaveCount(1); +---- + +[source,csharp] +---- +Assert( + q => q.Query() && (q.Query() || q.Query() || q.Query()), + Query && (Query || Query || Query), + c => + { + c.Bool.Must.Should().HaveCount(2); + var lastClause = c.Bool.Must.Last() as IQueryContainer; + lastClause.Should().NotBeNull(); + lastClause.Bool.Should().NotBeNull(); + lastClause.Bool.Should.Should().HaveCount(3); + }); + +c.Bool.Must.Should().HaveCount(2); + +var lastClause = c.Bool.Must.Last() as IQueryContainer; + +lastClause.Should().NotBeNull(); + +lastClause.Bool.Should().NotBeNull(); + +lastClause.Bool.Should.Should().HaveCount(3); +---- + +TIP: *add parentheses to force evaluation order* + +Also note that using shoulds as boosting factors can be really powerful so if you need this +always remember that you can mix and match an actual bool query with the bool dsl. + +There is another subtle situation where NEST will not blindly merge 2 bool queries with only should clauses. Imagine the following: + +`bool(should=term1, term2, term3, term4, minimum_should_match=2) || term5 || term6` + +if NEST identified both sides of the OR operation as only containing `should` clauses and it would +join them together it would give a different meaning to the `minimum_should_match` parameter of the first boolean query. +Rewriting this to a single bool with 5 `should` clauses would break because only matching on `term5` or `term6` should still be a hit. + +[source,csharp] +---- +Assert( + q => q.Bool(b => b + .Should(mq => mq.Query(), mq => mq.Query(), mq => mq.Query(), mq => mq.Query()) + .MinimumShouldMatch(2) + ) + || !q.Query() || q.Query(), + new BoolQuery + { + Should = new QueryContainer[] { Query, Query, Query, Query }, + MinimumShouldMatch = 2 + } || !Query || Query, + c => + { + c.Bool.Should.Should().HaveCount(3); + var nestedBool = c.Bool.Should.First() as IQueryContainer; + nestedBool.Bool.Should.Should().HaveCount(4); + }); + +c.Bool.Should.Should().HaveCount(3); + +var nestedBool = c.Bool.Should.First() as IQueryContainer; + +nestedBool.Bool.Should.Should().HaveCount(4); +---- + +=== Locked bool queries + +NEST will not combine `bool` queries if any of the query metadata is set e.g if metadata such as `boost` or `name` are set, +NEST will treat these as locked + +Here we demonstrate that two locked `bool` queries are not combined + +[source,csharp] +---- +Assert( + q => q.Bool(b => b.Name("leftBool").Should(mq => mq.Query())) + || q.Bool(b => b.Name("rightBool").Should(mq => mq.Query())), + new BoolQuery { Name = "leftBool", Should = new QueryContainer[] { Query } } + || new BoolQuery { Name = "rightBool", Should = new QueryContainer[] { Query } }, + c => AssertDoesNotJoinOntoLockedBool(c, "leftBool")); +---- + +neither are two `bool` queries where either right query is locked + +[source,csharp] +---- +Assert( + q => q.Bool(b => b.Should(mq => mq.Query())) + || q.Bool(b => b.Name("rightBool").Should(mq => mq.Query())), + new BoolQuery { Should = new QueryContainer[] { Query } } + || new BoolQuery { Name = "rightBool", Should = new QueryContainer[] { Query } }, + c => AssertDoesNotJoinOntoLockedBool(c, "rightBool")); +---- + +or the left query is locked + +[source,csharp] +---- +Assert( + q => q.Bool(b => b.Name("leftBool").Should(mq => mq.Query())) + || q.Bool(b => b.Should(mq => mq.Query())), + new BoolQuery { Name = "leftBool", Should = new QueryContainer[] { Query } } + || new BoolQuery { Should = new QueryContainer[] { Query } }, + c => AssertDoesNotJoinOntoLockedBool(c, "leftBool")); +---- + +[source,csharp] +---- +c.Bool.Should.Should().HaveCount(2); + +var nestedBool = c.Bool.Should.Cast().First(b=>!string.IsNullOrEmpty(b.Bool?.Name)); + +nestedBool.Bool.Should.Should().HaveCount(1); + +nestedBool.Bool.Name.Should().Be(firstName); +---- + +[source,csharp] +---- +assert(fluent.InvokeQuery(new QueryContainerDescriptor())); + +assert((QueryContainer)ois); +---- + diff --git a/docs/asciidoc/query-dsl/bool-dsl/hadouken-indentation.jpg b/docs/asciidoc/query-dsl/bool-dsl/hadouken-indentation.jpg new file mode 100644 index 00000000000..afe03b960d1 Binary files /dev/null and b/docs/asciidoc/query-dsl/bool-dsl/hadouken-indentation.jpg differ diff --git a/docs/asciidoc/query-dsl/bool-dsl/operators/and-operator-usage.asciidoc b/docs/asciidoc/query-dsl/bool-dsl/operators/and-operator-usage.asciidoc new file mode 100644 index 00000000000..dc4a20e635f --- /dev/null +++ b/docs/asciidoc/query-dsl/bool-dsl/operators/and-operator-usage.asciidoc @@ -0,0 +1,104 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[and-operator-usage]] +== And Operator Usage + +[source,csharp] +---- +var lotsOfAnds = Enumerable.Range(0, 100).Aggregate(new QueryContainer(), (q, c) => q && Query, q => q); + +LotsOfAnds(lotsOfAnds); +---- + +[source,csharp] +---- +QueryContainer container = null; +---- + +[source,csharp] +---- +ReturnsBool(Query && Query, q => q.Query() && q.Query(), b => +{ + b.Must.Should().NotBeEmpty().And.HaveCount(2); + b.Should.Should().BeNull(); + b.MustNot.Should().BeNull(); + b.Filter.Should().BeNull(); +}); +b.Must.Should().NotBeEmpty().And.HaveCount(2); +b.Should.Should().BeNull(); +b.MustNot.Should().BeNull(); +b.Filter.Should().BeNull(); +ReturnsBool(Query && Query && ConditionlessQuery, q => q.Query() && q.Query() && q.ConditionlessQuery(), b => +{ + b.Must.Should().NotBeEmpty().And.HaveCount(2); + b.Should.Should().BeNull(); + b.MustNot.Should().BeNull(); + b.Filter.Should().BeNull(); +}); +b.Must.Should().NotBeEmpty().And.HaveCount(2); +b.Should.Should().BeNull(); +b.MustNot.Should().BeNull(); +b.Filter.Should().BeNull(); +ReturnsSingleQuery(Query && ConditionlessQuery, q => q.Query() && q.ConditionlessQuery(), + c => c.Term.Value.Should().NotBeNull()); +ReturnsSingleQuery(ConditionlessQuery && Query, q => q.ConditionlessQuery() && q.Query(), + c => c.Term.Value.Should().NotBeNull()); +ReturnsSingleQuery(Query && NullQuery, q => q.Query() && q.NullQuery(), + c => c.Term.Value.Should().NotBeNull()); +ReturnsSingleQuery(NullQuery && Query, q=> q.NullQuery() && q.Query(), + c => c.Term.Value.Should().NotBeNull()); +ReturnsSingleQuery(ConditionlessQuery && ConditionlessQuery && ConditionlessQuery && Query, + q => q.ConditionlessQuery() && q.ConditionlessQuery() && q.ConditionlessQuery() && q.Query(), + c => c.Term.Value.Should().NotBeNull()); +ReturnsSingleQuery( + NullQuery && NullQuery && ConditionlessQuery && Query, + q=>q.NullQuery() && q.NullQuery() && q.ConditionlessQuery() && q.Query(), + c => c.Term.Value.Should().NotBeNull()); +ReturnsNull(NullQuery && ConditionlessQuery, q=> q.NullQuery() && q.ConditionlessQuery()); +ReturnsNull(ConditionlessQuery && NullQuery, q=>q.ConditionlessQuery() && q.NullQuery()); +ReturnsNull(ConditionlessQuery && ConditionlessQuery, q=>q.ConditionlessQuery() && q.ConditionlessQuery()); +ReturnsNull( + ConditionlessQuery && ConditionlessQuery && ConditionlessQuery && ConditionlessQuery, + q=>q.ConditionlessQuery() && q.ConditionlessQuery() && q.ConditionlessQuery() && q.ConditionlessQuery() + +); +ReturnsNull( + NullQuery && ConditionlessQuery && ConditionlessQuery && ConditionlessQuery, + q=>q.NullQuery() && q.ConditionlessQuery() && q.ConditionlessQuery() && q.ConditionlessQuery() +); +---- + +[source,csharp] +---- +foreach(var i in Enumerable.Range(0, 100)) + container &= Query; + +LotsOfAnds(container); +---- + +[source,csharp] +---- +var container = new QueryContainer(); +---- + +[source,csharp] +---- +foreach(var i in Enumerable.Range(0, 100)) + container &= Query; + +LotsOfAnds(container); +---- + +[source,csharp] +---- +lotsOfAnds.Should().NotBeNull(); + +lotsOfAnds.Bool.Should().NotBeNull(); + +lotsOfAnds.Bool.Must.Should().NotBeEmpty().And.HaveCount(100); +---- + diff --git a/docs/asciidoc/query-dsl/bool-dsl/operators/not-operator-usage.asciidoc b/docs/asciidoc/query-dsl/bool-dsl/operators/not-operator-usage.asciidoc new file mode 100644 index 00000000000..7e17d8abcfc --- /dev/null +++ b/docs/asciidoc/query-dsl/bool-dsl/operators/not-operator-usage.asciidoc @@ -0,0 +1,119 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[not-operator-usage]] +== Not Operator Usage + +[source,csharp] +---- +ReturnsBool(!Query && !Query, q => !q.Query() && !q.Query(), b => +{ + b.MustNot.Should().NotBeEmpty().And.HaveCount(2); + b.Must.Should().BeNull(); + b.Should.Should().BeNull(); + b.Filter.Should().BeNull(); +}); +b.MustNot.Should().NotBeEmpty().And.HaveCount(2); +b.Must.Should().BeNull(); +b.Should.Should().BeNull(); +b.Filter.Should().BeNull(); +ReturnsBool(!Query || !Query || !ConditionlessQuery, q => !q.Query() || !q.Query() || !q.ConditionlessQuery(), b => +{ + b.Should.Should().NotBeEmpty().And.HaveCount(2); + b.Must.Should().BeNull(); + b.MustNot.Should().BeNull(); + b.Filter.Should().BeNull(); + foreach (IQueryContainer q in b.Should) + { + q.Bool.Should().NotBeNull(); + q.Bool.MustNot.Should().NotBeEmpty().And.HaveCount(1); + } +}); +b.Should.Should().NotBeEmpty().And.HaveCount(2); +b.Must.Should().BeNull(); +b.MustNot.Should().BeNull(); +b.Filter.Should().BeNull(); + +foreach (IQueryContainer q in b.Should) +{ + q.Bool.Should().NotBeNull(); + q.Bool.MustNot.Should().NotBeEmpty().And.HaveCount(1); +} +---- + +[source,csharp] +---- +var lotsOfNots = Enumerable.Range(0, 100).Aggregate(new QueryContainer(), (q, c) => q || Query, q => q); + +LotsOfNots(lotsOfNots); +---- + +[source,csharp] +---- +QueryContainer container = null; +---- + +[source,csharp] +---- +ReturnsSingleQuery(!Query || !ConditionlessQuery, q => !q.Query() || !q.ConditionlessQuery(), + c => c.Bool.MustNot.Should().NotBeNull().And.HaveCount(1)); +ReturnsSingleQuery(!ConditionlessQuery || !Query, q => !q.ConditionlessQuery() || !q.Query(), + c => c.Bool.MustNot.Should().NotBeNull().And.HaveCount(1)); +ReturnsSingleQuery(!Query || !NullQuery, q => !q.Query() || !q.NullQuery(), + c => c.Bool.MustNot.Should().NotBeNull().And.HaveCount(1)); +ReturnsSingleQuery(!NullQuery && !Query, q => !q.NullQuery() && !q.Query(), + c => c.Bool.MustNot.Should().NotBeNull().And.HaveCount(1)); +ReturnsSingleQuery(!ConditionlessQuery || !ConditionlessQuery && !ConditionlessQuery || !Query, + q => !q.ConditionlessQuery() || !q.ConditionlessQuery() && !q.ConditionlessQuery() || !q.Query(), + c => c.Bool.MustNot.Should().NotBeNull().And.HaveCount(1)); +ReturnsSingleQuery( + !NullQuery || !NullQuery || !ConditionlessQuery || !Query, + q => !q.NullQuery() || !q.NullQuery() || !q.ConditionlessQuery() || !q.Query(), + c => c.Bool.MustNot.Should().NotBeNull()); +ReturnsNull(!NullQuery || !ConditionlessQuery, q => !q.NullQuery() || !q.ConditionlessQuery()); +ReturnsNull(!ConditionlessQuery && !NullQuery, q => !q.ConditionlessQuery() && !q.NullQuery()); +ReturnsNull(!ConditionlessQuery || !ConditionlessQuery, q => !q.ConditionlessQuery() || !q.ConditionlessQuery()); +ReturnsNull( + !ConditionlessQuery || !ConditionlessQuery || !ConditionlessQuery || !ConditionlessQuery, + q => !q.ConditionlessQuery() || !q.ConditionlessQuery() || !q.ConditionlessQuery() || !q.ConditionlessQuery() + +); +ReturnsNull( + !NullQuery || !ConditionlessQuery || !ConditionlessQuery || !ConditionlessQuery, + q => !q.NullQuery() || !q.ConditionlessQuery() || !q.ConditionlessQuery() || !q.ConditionlessQuery() +); +---- + +[source,csharp] +---- +foreach (var i in Enumerable.Range(0, 100)) + container |= Query; + +LotsOfNots(container); +---- + +[source,csharp] +---- +var container = new QueryContainer(); +---- + +[source,csharp] +---- +foreach (var i in Enumerable.Range(0, 100)) + container |= Query; + +LotsOfNots(container); +---- + +[source,csharp] +---- +lotsOfNots.Should().NotBeNull(); + +lotsOfNots.Bool.Should().NotBeNull(); + +lotsOfNots.Bool.Should.Should().NotBeEmpty().And.HaveCount(100); +---- + diff --git a/docs/asciidoc/query-dsl/bool-dsl/operators/or-operator-usage.asciidoc b/docs/asciidoc/query-dsl/bool-dsl/operators/or-operator-usage.asciidoc new file mode 100644 index 00000000000..cfc89d83f77 --- /dev/null +++ b/docs/asciidoc/query-dsl/bool-dsl/operators/or-operator-usage.asciidoc @@ -0,0 +1,115 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[or-operator-usage]] +== Or Operator Usage + +[source,csharp] +---- +var lotsOfOrs = Enumerable.Range(0, 100).Aggregate(new QueryContainer(), (q, c) => q || Query, q => q); + +LotsOfOrs(lotsOfOrs); +---- + +[source,csharp] +---- +QueryContainer container = null; +---- + +[source,csharp] +---- +ReturnsBool(Query || Query, q => q.Query() || q.Query(), b => +{ + b.Should.Should().NotBeEmpty().And.HaveCount(2); + b.Must.Should().BeNull(); + b.MustNot.Should().BeNull(); + b.Filter.Should().BeNull(); +}); +b.Should.Should().NotBeEmpty().And.HaveCount(2); +b.Must.Should().BeNull(); +b.MustNot.Should().BeNull(); +b.Filter.Should().BeNull(); +ReturnsBool(Query || Query || ConditionlessQuery, q => q.Query() || q.Query() || q.ConditionlessQuery(), b => +{ + b.Should.Should().NotBeEmpty().And.HaveCount(2); + b.Must.Should().BeNull(); + b.MustNot.Should().BeNull(); + b.Filter.Should().BeNull(); +}); +b.Should.Should().NotBeEmpty().And.HaveCount(2); +b.Must.Should().BeNull(); +b.MustNot.Should().BeNull(); +b.Filter.Should().BeNull(); +ReturnsBool(Query || Query || ConditionlessQuery, q => q.Query() || q.Query() || q.ConditionlessQuery(), b => +{ + b.Should.Should().NotBeEmpty().And.HaveCount(2); + b.Must.Should().BeNull(); + b.MustNot.Should().BeNull(); + b.Filter.Should().BeNull(); +}); +b.Should.Should().NotBeEmpty().And.HaveCount(2); +b.Must.Should().BeNull(); +b.MustNot.Should().BeNull(); +b.Filter.Should().BeNull(); +ReturnsSingleQuery(Query || ConditionlessQuery, q => q.Query() || q.ConditionlessQuery(), + c => c.Term.Value.Should().NotBeNull()); +ReturnsSingleQuery(ConditionlessQuery || Query, q => q.ConditionlessQuery() || q.Query(), + c => c.Term.Value.Should().NotBeNull()); +ReturnsSingleQuery(Query || NullQuery, q => q.Query() || q.NullQuery(), + c => c.Term.Value.Should().NotBeNull()); +ReturnsSingleQuery(NullQuery || Query, q=> q.NullQuery() || q.Query(), + c => c.Term.Value.Should().NotBeNull()); +ReturnsSingleQuery(ConditionlessQuery || ConditionlessQuery || ConditionlessQuery || Query, + q => q.ConditionlessQuery() || q.ConditionlessQuery() || q.ConditionlessQuery() || q.Query(), + c => c.Term.Value.Should().NotBeNull()); +ReturnsSingleQuery( + NullQuery || NullQuery || ConditionlessQuery || Query, + q=>q.NullQuery() || q.NullQuery() || q.ConditionlessQuery() || q.Query(), + c => c.Term.Value.Should().NotBeNull()); +ReturnsNull(NullQuery || ConditionlessQuery, q=> q.NullQuery() || q.ConditionlessQuery()); +ReturnsNull(ConditionlessQuery || NullQuery, q=>q.ConditionlessQuery() || q.NullQuery()); +ReturnsNull(ConditionlessQuery || ConditionlessQuery, q=>q.ConditionlessQuery() || q.ConditionlessQuery()); +ReturnsNull( + ConditionlessQuery || ConditionlessQuery || ConditionlessQuery || ConditionlessQuery, + q=>q.ConditionlessQuery() || q.ConditionlessQuery() || q.ConditionlessQuery() || q.ConditionlessQuery() + +); +ReturnsNull( + NullQuery || ConditionlessQuery || ConditionlessQuery || ConditionlessQuery, + q=>q.NullQuery() || q.ConditionlessQuery() || q.ConditionlessQuery() || q.ConditionlessQuery() +); +---- + +[source,csharp] +---- +foreach(var i in Enumerable.Range(0, 100)) + container |= Query; + +LotsOfOrs(container); +---- + +[source,csharp] +---- +var container = new QueryContainer(); +---- + +[source,csharp] +---- +foreach(var i in Enumerable.Range(0, 100)) + container |= Query; + +LotsOfOrs(container); +---- + +[source,csharp] +---- +lotsOfOrs.Should().NotBeNull(); + +lotsOfOrs.Bool.Should().NotBeNull(); + +lotsOfOrs.Bool.Should.Should().NotBeEmpty().And.HaveCount(100); +---- + diff --git a/docs/asciidoc/query-dsl/bool-dsl/operators/unary-add-operator-usage.asciidoc b/docs/asciidoc/query-dsl/bool-dsl/operators/unary-add-operator-usage.asciidoc new file mode 100644 index 00000000000..509fe7943f7 --- /dev/null +++ b/docs/asciidoc/query-dsl/bool-dsl/operators/unary-add-operator-usage.asciidoc @@ -0,0 +1,134 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[unary-add-operator-usage]] +== Unary Add Operator Usage + +[source,csharp] +---- +ReturnsBool(+Query && +Query, q => +q.Query() && +q.Query(), b => +{ + b.Filter.Should().NotBeEmpty().And.HaveCount(2); + b.Must.Should().BeNull(); + b.Should.Should().BeNull(); + b.MustNot.Should().BeNull(); +}); +b.Filter.Should().NotBeEmpty().And.HaveCount(2); +b.Must.Should().BeNull(); +b.Should.Should().BeNull(); +b.MustNot.Should().BeNull(); +ReturnsBool(+Query || +Query || +ConditionlessQuery, q => +q.Query() || +q.Query() || +q.ConditionlessQuery(), b => +{ + b.Should.Should().NotBeEmpty().And.HaveCount(2); + b.Must.Should().BeNull(); + b.MustNot.Should().BeNull(); + b.MustNot.Should().BeNull(); + foreach (IQueryContainer q in b.Should) + { + q.Bool.Should().NotBeNull(); + q.Bool.Filter.Should().NotBeEmpty().And.HaveCount(1); + } +}); +b.Should.Should().NotBeEmpty().And.HaveCount(2); +b.Must.Should().BeNull(); +b.MustNot.Should().BeNull(); +b.MustNot.Should().BeNull(); + +foreach (IQueryContainer q in b.Should) +{ + q.Bool.Should().NotBeNull(); + q.Bool.Filter.Should().NotBeEmpty().And.HaveCount(1); +} +---- + +[source,csharp] +---- +var lotsOfUnaryAdds = Enumerable.Range(0, 100).Aggregate(new QueryContainer(), (q, c) => q && +Query, q => q); + +LotsOfUnaryAdds(lotsOfUnaryAdds); +---- + +[source,csharp] +---- +QueryContainer container = null; +---- + +[source,csharp] +---- +ReturnsSingleQuery(+Query || +ConditionlessQuery, q => +q.Query() || +q.ConditionlessQuery(), + c => c.Bool.Filter.Should().NotBeNull().And.HaveCount(1)); +ReturnsSingleQuery(+ConditionlessQuery || +Query, q => +q.ConditionlessQuery() || +q.Query(), + c => c.Bool.Filter.Should().NotBeNull().And.HaveCount(1)); +ReturnsSingleQuery(+Query || +NullQuery, q => +q.Query() || +q.NullQuery(), + c => c.Bool.Filter.Should().NotBeNull().And.HaveCount(1)); +ReturnsSingleQuery(+NullQuery && +Query, q => +q.NullQuery() && +q.Query(), + c => c.Bool.Filter.Should().NotBeNull().And.HaveCount(1)); +ReturnsSingleQuery(+ConditionlessQuery || +ConditionlessQuery && +ConditionlessQuery || +Query, + q => +q.ConditionlessQuery() || +q.ConditionlessQuery() && +q.ConditionlessQuery() || +q.Query(), + c => c.Bool.Filter.Should().NotBeNull().And.HaveCount(1)); +ReturnsSingleQuery( + +NullQuery || +NullQuery || +ConditionlessQuery || +Query, + q => +q.NullQuery() || +q.NullQuery() || +q.ConditionlessQuery() || +q.Query(), + c => c.Bool.Filter.Should().NotBeNull()); +ReturnsNull(+NullQuery || +ConditionlessQuery, q => +q.NullQuery() || +q.ConditionlessQuery()); +ReturnsNull(+ConditionlessQuery && +NullQuery, q => +q.ConditionlessQuery() && +q.NullQuery()); +ReturnsNull(+ConditionlessQuery || +ConditionlessQuery, q => +q.ConditionlessQuery() || +q.ConditionlessQuery()); +ReturnsNull( + +ConditionlessQuery || +ConditionlessQuery || +ConditionlessQuery || +ConditionlessQuery, + q => +q.ConditionlessQuery() || +q.ConditionlessQuery() || +q.ConditionlessQuery() || +q.ConditionlessQuery() + +); +ReturnsNull( + +NullQuery || +ConditionlessQuery || +ConditionlessQuery || +ConditionlessQuery, + q => +q.NullQuery() || +q.ConditionlessQuery() || +q.ConditionlessQuery() || +q.ConditionlessQuery() +); +---- + +[source,csharp] +---- +foreach (var i in Enumerable.Range(0, 100)) + container &= +Query; + +LotsOfUnaryAdds(container); +---- + +[source,csharp] +---- +var container = new QueryContainer(); +---- + +[source,csharp] +---- +foreach (var i in Enumerable.Range(0, 100)) + container &= +Query; + +LotsOfUnaryAdds(container); +---- + +[source,csharp] +---- +lotsOfUnaryAdds.Should().NotBeNull(); + +lotsOfUnaryAdds.Bool.Should().NotBeNull(); + +lotsOfUnaryAdds.Bool.Filter.Should().NotBeEmpty().And.HaveCount(100); +---- + +[source,csharp] +---- +var container = new QueryContainer(); +---- + +[source,csharp] +---- +foreach (var i in Enumerable.Range(0, 100)) + container |= +Query; + +var c = container as IQueryContainer; + +c.Bool.Should.Should().NotBeEmpty().And.HaveCount(100); +---- + diff --git a/docs/asciidoc/query-dsl/compound/and/and-query-usage.asciidoc b/docs/asciidoc/query-dsl/compound/and/and-query-usage.asciidoc new file mode 100644 index 00000000000..936a2d2d602 --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/and/and-query-usage.asciidoc @@ -0,0 +1,62 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[and-query-usage]] +== And Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.And(c => c + .Name("named_query") + .Boost(1.1) + .Filters( + qq => qq.MatchAll(m => m.Name("query1")), + qq => qq.MatchAll(m => m.Name("query2")) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new AndQuery() +{ + Name = "named_query", + Boost = 1.1, + Filters = new QueryContainer[] { + new MatchAllQuery() { Name = "query1" }, + new MatchAllQuery() { Name = "query2" }, + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "and": { + "_name": "named_query", + "boost": 1.1, + "filters": [ + { + "match_all": { + "_name": "query1" + } + }, + { + "match_all": { + "_name": "query2" + } + } + ] + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/bool/bool-dsl-complex-query-usage.asciidoc b/docs/asciidoc/query-dsl/compound/bool/bool-dsl-complex-query-usage.asciidoc new file mode 100644 index 00000000000..e4a2111864e --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/bool/bool-dsl-complex-query-usage.asciidoc @@ -0,0 +1,191 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[bool-dsl-complex-query-usage]] +== Bool Dsl Complex Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q.Query() && q.Query() +//second bool +|| ( + //complex nested bool + (+q.Query() || +q.Query() || !q.Query() && (!q.Query() && !q.ConditionlessQuery())) + // simple nested or + && (q.Query() || q.Query() || q.Query()) + //all conditionless bool + && (q.NullQuery() || +q.ConditionlessQuery() || !q.ConditionlessQuery()) + // actual bool query + && (base.QueryFluent(q))) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +Query && Query +//second bool +|| ( + //complex nested bool + (+Query || +Query || !Query && (!Query && !ConditionlessQuery)) + // simple nested or + && (Query || Query || Query) + //all conditionless bool + && (NullQuery || +ConditionlessQuery || !ConditionlessQuery) + // actual bool query + && (base.QueryInitializer)) +---- + +[source,javascript] +.Example json output +---- +{ + "bool": { + "should": [ + { + "bool": { + "must": [ + { + "term": { + "x": { + "value": "y" + } + } + }, + { + "term": { + "x": { + "value": "y" + } + } + } + ] + } + }, + { + "bool": { + "must": [ + { + "bool": { + "must": [ + { + "bool": { + "should": [ + { + "bool": { + "filter": [ + { + "term": { + "x": { + "value": "y" + } + } + } + ] + } + }, + { + "bool": { + "filter": [ + { + "term": { + "x": { + "value": "y" + } + } + } + ] + } + }, + { + "bool": { + "must_not": [ + { + "term": { + "x": { + "value": "y" + } + } + }, + { + "term": { + "x": { + "value": "y" + } + } + } + ] + } + } + ] + } + }, + { + "bool": { + "should": [ + { + "term": { + "x": { + "value": "y" + } + } + }, + { + "term": { + "x": { + "value": "y" + } + } + }, + { + "term": { + "x": { + "value": "y" + } + } + } + ] + } + } + ] + } + }, + { + "bool": { + "must": [ + { + "match_all": {} + } + ], + "must_not": [ + { + "match_all": {} + } + ], + "should": [ + { + "match_all": {} + } + ], + "filter": [ + { + "match_all": {} + } + ], + "minimum_should_match": 1, + "boost": 2.0 + } + } + ] + } + } + ] + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/bool/bool-query-usage.asciidoc b/docs/asciidoc/query-dsl/compound/bool/bool-query-usage.asciidoc new file mode 100644 index 00000000000..6fd39a3da8b --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/bool/bool-query-usage.asciidoc @@ -0,0 +1,69 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[bool-query-usage]] +== Bool Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Bool(b => b + .MustNot(m => m.MatchAll()) + .Should(m => m.MatchAll()) + .Must(m => m.MatchAll()) + .Filter(f => f.MatchAll()) + .MinimumShouldMatch(1) + .Boost(2)) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new BoolQuery() +{ + MustNot = new QueryContainer[] { new MatchAllQuery() }, + Should = new QueryContainer[] { new MatchAllQuery() }, + Must = new QueryContainer[] { new MatchAllQuery() }, + Filter = new QueryContainer[] { new MatchAllQuery() }, + MinimumShouldMatch = 1, + Boost = 2 +} +---- + +[source,javascript] +.Example json output +---- +{ + "bool": { + "must": [ + { + "match_all": {} + } + ], + "must_not": [ + { + "match_all": {} + } + ], + "should": [ + { + "match_all": {} + } + ], + "filter": [ + { + "match_all": {} + } + ], + "minimum_should_match": 1, + "boost": 2.0 + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/boosting/boosting-query-usage.asciidoc b/docs/asciidoc/query-dsl/compound/boosting/boosting-query-usage.asciidoc new file mode 100644 index 00000000000..8f9932976ef --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/boosting/boosting-query-usage.asciidoc @@ -0,0 +1,59 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[boosting-query-usage]] +== Boosting Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Boosting(c => c + .Name("named_query") + .Boost(1.1) + .Positive(qq => qq.MatchAll(m => m.Name("filter"))) + .Negative(qq => qq.MatchAll(m => m.Name("query"))) + .NegativeBoost(1.12) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new BoostingQuery() +{ + Name = "named_query", + Boost = 1.1, + PositiveQuery = new MatchAllQuery { Name ="filter" }, + NegativeQuery= new MatchAllQuery() { Name = "query" }, + NegativeBoost = 1.12 +} +---- + +[source,javascript] +.Example json output +---- +{ + "boosting": { + "_name": "named_query", + "boost": 1.1, + "negative": { + "match_all": { + "_name": "query" + } + }, + "negative_boost": 1.12, + "positive": { + "match_all": { + "_name": "filter" + } + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/constant-score/constant-score-query-usage.asciidoc b/docs/asciidoc/query-dsl/compound/constant-score/constant-score-query-usage.asciidoc new file mode 100644 index 00000000000..e06551baddb --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/constant-score/constant-score-query-usage.asciidoc @@ -0,0 +1,49 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[constant-score-query-usage]] +== Constant Score Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.ConstantScore(c => c + .Name("named_query") + .Boost(1.1) + .Filter(qq => qq.MatchAll(m => m.Name("filter"))) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new ConstantScoreQuery() +{ + Name = "named_query", + Boost = 1.1, + Filter = new MatchAllQuery { Name = "filter" }, +} +---- + +[source,javascript] +.Example json output +---- +{ + "constant_score": { + "_name": "named_query", + "boost": 1.1, + "filter": { + "match_all": { + "_name": "filter" + } + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/dismax/dismax-query-usage.asciidoc b/docs/asciidoc/query-dsl/compound/dismax/dismax-query-usage.asciidoc new file mode 100644 index 00000000000..54466b1a0cb --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/dismax/dismax-query-usage.asciidoc @@ -0,0 +1,65 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[dismax-query-usage]] +== Dismax Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.DisMax(c => c + .Name("named_query") + .Boost(1.1) + .TieBreaker(1.11) + .Queries( + qq => qq.MatchAll(m => m.Name("query1")), + qq => qq.MatchAll(m => m.Name("query2")) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new DisMaxQuery() +{ + Name = "named_query", + Boost = 1.1, + TieBreaker = 1.11, + Queries = new QueryContainer[] { + new MatchAllQuery() { Name = "query1" }, + new MatchAllQuery() { Name = "query2" }, + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "dis_max": { + "_name": "named_query", + "boost": 1.1, + "queries": [ + { + "match_all": { + "_name": "query1" + } + }, + { + "match_all": { + "_name": "query2" + } + } + ], + "tie_breaker": 1.11 + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/filtered/filtered-query-usage.asciidoc b/docs/asciidoc/query-dsl/compound/filtered/filtered-query-usage.asciidoc new file mode 100644 index 00000000000..73c5c33fc59 --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/filtered/filtered-query-usage.asciidoc @@ -0,0 +1,56 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[filtered-query-usage]] +== Filtered Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Filtered(c => c + .Name("named_query") + .Boost(1.1) + .Filter(qq => qq.MatchAll(m => m.Name("filter"))) + .Query(qq => qq.MatchAll(m => m.Name("query"))) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new FilteredQuery() +{ + Name = "named_query", + Boost = 1.1, + Filter = new MatchAllQuery { Name ="filter" }, + Query = new MatchAllQuery() { Name = "query" }, +} +---- + +[source,javascript] +.Example json output +---- +{ + "filtered": { + "_name": "named_query", + "boost": 1.1, + "filter": { + "match_all": { + "_name": "filter" + } + }, + "query": { + "match_all": { + "_name": "query" + } + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/full-text/common-terms/common-terms-usage.asciidoc b/docs/asciidoc/query-dsl/compound/full-text/common-terms/common-terms-usage.asciidoc new file mode 100644 index 00000000000..52f8c6fd1cb --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/full-text/common-terms/common-terms-usage.asciidoc @@ -0,0 +1,67 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[common-terms-usage]] +== Common Terms Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.CommonTerms(c => c + .Field(p => p.Description) + .Analyzer("standard") + .Boost(1.1) + .CutoffFrequency(0.001) + .DisableCoord() + .HighFrequencyOperator(Operator.And) + .LowFrequencyOperator(Operator.Or) + .MinimumShouldMatch(1) + .Name("named_query") + .Query("nelly the elephant not as a") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new CommonTermsQuery() +{ + Field = Field(p => p.Description), + Analyzer = "standard", + Boost = 1.1, + CutoffFrequency = 0.001, + DisableCoord = true, + HighFrequencyOperator = Operator.And, + LowFrequencyOperator = Operator.Or, + MinimumShouldMatch = 1, + Name = "named_query", + Query = "nelly the elephant not as a" +} +---- + +[source,javascript] +.Example json output +---- +{ + "common": { + "description": { + "_name": "named_query", + "boost": 1.1, + "query": "nelly the elephant not as a", + "cutoff_frequency": 0.001, + "low_freq_operator": "or", + "high_freq_operator": "and", + "minimum_should_match": 1, + "analyzer": "standard", + "disable_coord": true + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/full-text/match/match-phrase-prefix-usage.asciidoc b/docs/asciidoc/query-dsl/compound/full-text/match/match-phrase-prefix-usage.asciidoc new file mode 100644 index 00000000000..cadfeba1b29 --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/full-text/match/match-phrase-prefix-usage.asciidoc @@ -0,0 +1,83 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[match-phrase-prefix-usage]] +== Match Phrase Prefix Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.MatchPhrasePrefix(c => c + .Field(p => p.Description) + .Analyzer("standard") + .Boost(1.1) + .CutoffFrequency(0.001) + .Query("hello worl") + .Fuzziness(Fuzziness.Auto) + .Lenient() + .FuzzyTranspositions() + .MaxExpansions(2) + .MinimumShouldMatch(2) + .PrefixLength(2) + .Operator(Operator.Or) + .FuzzyRewrite(RewriteMultiTerm.ConstantScoreBoolean) + .Slop(2) + .Name("named_query") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new MatchPhrasePrefixQuery +{ + Field = Field(p => p.Description), + Analyzer = "standard", + Boost = 1.1, + Name = "named_query", + CutoffFrequency = 0.001, + Query = "hello worl", + Fuzziness = Fuzziness.Auto, + FuzzyTranspositions = true, + MinimumShouldMatch = 2, + FuzzyRewrite = RewriteMultiTerm.ConstantScoreBoolean, + MaxExpansions = 2, + Slop = 2, + Lenient = true, + Operator = Operator.Or, + PrefixLength = 2 +} +---- + +[source,javascript] +.Example json output +---- +{ + "match": { + "description": { + "_name": "named_query", + "boost": 1.1, + "query": "hello worl", + "analyzer": "standard", + "fuzzy_rewrite": "constant_score_boolean", + "fuzziness": "AUTO", + "fuzzy_transpositions": true, + "cutoff_frequency": 0.001, + "prefix_length": 2, + "max_expansions": 2, + "slop": 2, + "lenient": true, + "minimum_should_match": 2, + "operator": "or", + "type": "phrase_prefix" + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/full-text/match/match-phrase-usage.asciidoc b/docs/asciidoc/query-dsl/compound/full-text/match/match-phrase-usage.asciidoc new file mode 100644 index 00000000000..850077abb54 --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/full-text/match/match-phrase-usage.asciidoc @@ -0,0 +1,83 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[match-phrase-usage]] +== Match Phrase Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.MatchPhrase(c => c + .Field(p => p.Description) + .Analyzer("standard") + .Boost(1.1) + .CutoffFrequency(0.001) + .Query("hello world") + .Fuzziness(Fuzziness.Auto) + .Lenient() + .FuzzyTranspositions() + .MaxExpansions(2) + .MinimumShouldMatch(2) + .PrefixLength(2) + .Operator(Operator.Or) + .FuzzyRewrite(RewriteMultiTerm.ConstantScoreBoolean) + .Slop(2) + .Name("named_query") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new MatchPhraseQuery +{ + Field = Field(p=>p.Description), + Analyzer = "standard", + Boost = 1.1, + Name = "named_query", + CutoffFrequency = 0.001, + Query = "hello world", + Fuzziness = Fuzziness.Auto, + FuzzyTranspositions = true, + MinimumShouldMatch = 2, + FuzzyRewrite = RewriteMultiTerm.ConstantScoreBoolean, + MaxExpansions = 2, + Slop = 2, + Lenient = true, + Operator = Operator.Or, + PrefixLength = 2 +} +---- + +[source,javascript] +.Example json output +---- +{ + "match": { + "description": { + "_name": "named_query", + "boost": 1.1, + "query": "hello world", + "analyzer": "standard", + "fuzzy_rewrite": "constant_score_boolean", + "fuzziness": "AUTO", + "fuzzy_transpositions": true, + "cutoff_frequency": 0.001, + "prefix_length": 2, + "max_expansions": 2, + "slop": 2, + "lenient": true, + "minimum_should_match": 2, + "operator": "or", + "type": "phrase" + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/full-text/match/match-usage.asciidoc b/docs/asciidoc/query-dsl/compound/full-text/match/match-usage.asciidoc new file mode 100644 index 00000000000..174b2c7f7dc --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/full-text/match/match-usage.asciidoc @@ -0,0 +1,82 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[match-usage]] +== Match Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Match(c => c + .Field(p => p.Description) + .Analyzer("standard") + .Boost(1.1) + .CutoffFrequency(0.001) + .Query("hello world") + .Fuzziness(Fuzziness.Auto) + .Lenient() + .FuzzyTranspositions() + .MaxExpansions(2) + .MinimumShouldMatch(2) + .PrefixLength(2) + .Operator(Operator.Or) + .FuzzyRewrite(RewriteMultiTerm.ConstantScoreBoolean) + .Slop(2) + .Name("named_query") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new MatchQuery +{ + Field = Field(p=>p.Description), + Analyzer = "standard", + Boost = 1.1, + Name = "named_query", + CutoffFrequency = 0.001, + Query = "hello world", + Fuzziness = Fuzziness.Auto, + FuzzyTranspositions = true, + MinimumShouldMatch = 2, + FuzzyRewrite = RewriteMultiTerm.ConstantScoreBoolean, + MaxExpansions = 2, + Slop = 2, + Lenient = true, + Operator = Operator.Or, + PrefixLength = 2 +} +---- + +[source,javascript] +.Example json output +---- +{ + "match": { + "description": { + "_name": "named_query", + "boost": 1.1, + "query": "hello world", + "analyzer": "standard", + "fuzzy_rewrite": "constant_score_boolean", + "fuzziness": "AUTO", + "fuzzy_transpositions": true, + "cutoff_frequency": 0.001, + "prefix_length": 2, + "max_expansions": 2, + "slop": 2, + "lenient": true, + "minimum_should_match": 2, + "operator": "or" + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/full-text/multi-match/multi-match-usage.asciidoc b/docs/asciidoc/query-dsl/compound/full-text/multi-match/multi-match-usage.asciidoc new file mode 100644 index 00000000000..af365d8a1a7 --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/full-text/multi-match/multi-match-usage.asciidoc @@ -0,0 +1,124 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[multi-match-usage]] +== Multi Match Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.MultiMatch(c => c + .Fields(f => f.Field(p=>p.Description).Field("myOtherField")) + .Query("hello world") + .Analyzer("standard") + .Boost(1.1) + .Slop(2) + .Fuzziness(Fuzziness.Auto) + .PrefixLength(2) + .MaxExpansions(2) + .Operator(Operator.Or) + .MinimumShouldMatch(2) + .FuzzyRewrite(RewriteMultiTerm.ConstantScoreBoolean) + .TieBreaker(1.1) + .CutoffFrequency(0.001) + .Lenient() + .ZeroTermsQuery(ZeroTermsQuery.All) + .Name("named_query") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new MultiMatchQuery +{ + Fields = Field(p=>p.Description).And("myOtherField"), + Query = "hello world", + Analyzer = "standard", + Boost = 1.1, + Slop = 2, + Fuzziness = Fuzziness.Auto, + PrefixLength = 2, + MaxExpansions = 2, + Operator = Operator.Or, + MinimumShouldMatch = 2, + FuzzyRewrite = RewriteMultiTerm.ConstantScoreBoolean, + TieBreaker = 1.1, + CutoffFrequency = 0.001, + Lenient = true, + ZeroTermsQuery = ZeroTermsQuery.All, + Name = "named_query", +} +---- + +[source,javascript] +.Example json output +---- +{ + "multi_match": { + "_name": "named_query", + "boost": 1.1, + "query": "hello world", + "analyzer": "standard", + "fuzzy_rewrite": "constant_score_boolean", + "fuzziness": "AUTO", + "cutoff_frequency": 0.001, + "prefix_length": 2, + "max_expansions": 2, + "slop": 2, + "lenient": true, + "tie_breaker": 1.1, + "minimum_should_match": 2, + "operator": "or", + "fields": [ + "description", + "myOtherField" + ], + "zero_terms_query": "all" + } +} +---- + +=== Fluent DSL Example + +[source,csharp] +---- +q +.MultiMatch(c => c + //.Fields(f => f.Field(p=>p.Description, 2.2).Field("myOtherField^0.3")) + .Fields(Field(p=>p.Description, 2.2).And("myOtherField^0.3")) + .Query("hello world") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new MultiMatchQuery +{ + Fields = Field(p=>p.Description, 2.2).And("myOtherField^0.3"), + Query = "hello world", +} +---- + +[source,javascript] +.Example json output +---- +{ + "multi_match": { + "query": "hello world", + "fields": [ + "description^2.2", + "myOtherField^0.3" + ] + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/full-text/query-string/query-string-usage.asciidoc b/docs/asciidoc/query-dsl/compound/full-text/query-string/query-string-usage.asciidoc new file mode 100644 index 00000000000..165e0db6a4d --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/full-text/query-string/query-string-usage.asciidoc @@ -0,0 +1,120 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[query-string-usage]] +== Query String Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.QueryString(c => c + .Name("named_query") + .Boost(1.1) + .Fields(f => f.Field(p=>p.Description).Field("myOtherField")) + .Query("hello world") + .DefaultField(p=>p.Description) + .DefaultOperator(Operator.Or) + .Analyzer("standard") + .QuoteAnalyzer("quote-an") + .AllowLeadingWildcard() + .AutoGeneratePhraseQueries() + .MaximumDeterminizedStates(2) + .LowercaseExpendedTerms() + .EnablePositionIncrements() + .Escape() + .UseDisMax() + .FuzzyPrefixLength(2) + .FuzzyMaxExpansions(3) + .FuzzyRewrite(RewriteMultiTerm.ConstantScore) + .Rewrite(RewriteMultiTerm.ConstantScore) + .Fuziness(Fuzziness.Auto) + .TieBreaker(1.2) + .AnalyzeWildcard() + .MinimumShouldMatch(2) + .QuoteFieldSuffix("'") + .Lenient() + .Locale("en_US") + .Timezone("root") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new QueryStringQuery +{ + Fields = Field(p=>p.Description).And("myOtherField"), + Boost = 1.1, + Name = "named_query", + Query = "hello world", + DefaultField = Field(p=>p.Description), + DefaultOperator = Operator.Or, + Analyzer = "standard", + QuoteAnalyzer = "quote-an", + AllowLeadingWildcard = true, + AutoGeneratePhraseQueries = true, + MaximumDeterminizedStates = 2, + LowercaseExpendedTerms = true, + EnablePositionIncrements = true, + Escape = true, + UseDisMax = true, + FuzzyPrefixLength = 2, + FuzzyMaxExpansions = 3, + FuzzyRewrite = RewriteMultiTerm.ConstantScore, + Rewrite = RewriteMultiTerm.ConstantScore, + Fuzziness = Fuzziness.Auto, + TieBreaker = 1.2, + AnalyzeWildcard = true, + MinimumShouldMatch = 2, + QuoteFieldSuffix = "'", + Lenient = true, + Locale = "en_US", + Timezone = "root" +} +---- + +[source,javascript] +.Example json output +---- +{ + "query_string": { + "_name": "named_query", + "boost": 1.1, + "query": "hello world", + "default_field": "description", + "default_operator": "or", + "analyzer": "standard", + "quote_analyzer": "quote-an", + "allow_leading_wildcard": true, + "lowercase_expanded_terms": true, + "enable_position_increments": true, + "fuzzy_max_expansions": 3, + "fuziness": "AUTO", + "fuzzy_prefix_length": 2, + "analyze_wildcard": true, + "auto_generate_phrase_queries": true, + "max_determinized_states": 2, + "minimum_should_match": 2, + "lenient": true, + "locale": "en_US", + "time_zone": "root", + "fields": [ + "description", + "myOtherField" + ], + "use_dis_max": true, + "tie_breaker": 1.2, + "rewrite": "constant_score", + "fuzzy_rewrite": "constant_score", + "quote_field_suffix": "'", + "escape": true + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/full-text/simple-query-string/simple-query-string-usage.asciidoc b/docs/asciidoc/query-dsl/compound/full-text/simple-query-string/simple-query-string-usage.asciidoc new file mode 100644 index 00000000000..b3236c9c12a --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/full-text/simple-query-string/simple-query-string-usage.asciidoc @@ -0,0 +1,75 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[simple-query-string-usage]] +== Simple Query String Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.SimpleQueryString(c => c + .Name("named_query") + .Boost(1.1) + .Fields(f => f.Field(p=>p.Description).Field("myOtherField")) + .Query("hello world") + .Analyzer("standard") + .DefaultOperator(Operator.Or) + .Flags(SimpleQueryStringFlags.And|SimpleQueryStringFlags.Near) + .Locale("en_US") + .LowercaseExpendedTerms() + .Lenient() + .AnalyzeWildcard() + .MinimumShouldMatch("30%") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SimpleQueryStringQuery +{ + Name = "named_query", + Boost = 1.1, + Fields = Field(p=>p.Description).And("myOtherField"), + Query = "hello world", + Analyzer = "standard", + DefaultOperator = Operator.Or, + Flags = SimpleQueryStringFlags.And|SimpleQueryStringFlags.Near, + Locale = "en_US", + LowercaseExpendedTerms = true, + Lenient = true, + AnalyzeWildcard = true, + MinimumShouldMatch = "30%" +} +---- + +[source,javascript] +.Example json output +---- +{ + "simple_query_string": { + "_name": "named_query", + "boost": 1.1, + "fields": [ + "description", + "myOtherField" + ], + "query": "hello world", + "analyzer": "standard", + "default_operator": "or", + "flags": "AND|NEAR", + "locale": "en_US", + "lowercase_expanded_terms": true, + "lenient": true, + "analyze_wildcard": true, + "minimum_should_match": "30%" + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/function-score/function-score-query-usage.asciidoc b/docs/asciidoc/query-dsl/compound/function-score/function-score-query-usage.asciidoc new file mode 100644 index 00000000000..b6a09ba2a14 --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/function-score/function-score-query-usage.asciidoc @@ -0,0 +1,144 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[function-score-query-usage]] +== Function Score Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.FunctionScore(c => c + .Name("named_query") + .Boost(1.1) + .Query(qq => qq.MatchAll()) + .BoostMode(FunctionBoostMode.Multiply) + .ScoreMode(FunctionScoreMode.Sum) + .MaxBoost(20.0) + .MinScore(1.0) + .Functions(f => f + .Exponential(b => b.Field(p => p.NumberOfCommits).Decay(0.5).Origin(1.0).Scale(0.1).Weight(2.1)) + .GaussDate(b => b.Field(p => p.LastActivity).Origin(DateMath.Now).Decay(0.5).Scale("1d")) + .LinearGeoLocation(b => b.Field(p => p.Location).Origin(new GeoLocation(70, -70)).Scale(Distance.Miles(1)).MultiValueMode(MultiValueMode.Average)) + .FieldValueFactor(b => b.Field("x").Factor(1.1).Missing(0.1).Modifier(FieldValueFactorModifier.Ln)) + .RandomScore(1337) + .RandomScore("randomstring") + .Weight(1.0) + .ScriptScore(ss => ss.Script(s => s.File("x"))) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new FunctionScoreQuery() +{ + Name = "named_query", + Boost = 1.1, + Query = new MatchAllQuery { }, + BoostMode = FunctionBoostMode.Multiply, + ScoreMode = FunctionScoreMode.Sum, + MaxBoost = 20.0, + MinScore = 1.0, + Functions = new List + { + new ExponentialDecayFunction { Origin = 1.0, Decay = 0.5, Field = Field(p=>p.NumberOfCommits), Scale = 0.1, Weight = 2.1 }, + new GaussDateDecayFunction { Origin = DateMath.Now, Field = Field(p=>p.LastActivity), Decay = 0.5, Scale = TimeSpan.FromDays(1) }, + new LinearGeoDecayFunction { Origin = new GeoLocation(70, -70), Field = Field(p=>p.Location), Scale = Distance.Miles(1), MultiValueMode = MultiValueMode.Average }, + new FieldValueFactorFunction + { + Field = "x", Factor = 1.1, Missing = 0.1, Modifier = FieldValueFactorModifier.Ln + }, + new RandomScoreFunction { Seed = 1337 }, + new RandomScoreFunction { Seed = "randomstring" }, + new WeightFunction { Weight = 1.0}, + new ScriptScoreFunction { Script = new ScriptQuery { File = "x" } } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "function_score": { + "_name": "named_query", + "boost": 1.1, + "boost_mode": "multiply", + "functions": [ + { + "exp": { + "numberOfCommits": { + "origin": 1.0, + "scale": 0.1, + "decay": 0.5 + } + }, + "weight": 2.1 + }, + { + "gauss": { + "lastActivity": { + "origin": "now", + "scale": "1d", + "decay": 0.5 + } + } + }, + { + "linear": { + "location": { + "origin": { + "lat": 70.0, + "lon": -70.0 + }, + "scale": "1.0mi" + }, + "multi_value_mode": "avg" + } + }, + { + "field_value_factor": { + "field": "x", + "factor": 1.1, + "missing": 0.1, + "modifier": "ln" + } + }, + { + "random_score": { + "seed": 1337 + } + }, + { + "random_score": { + "seed": "randomstring" + } + }, + { + "weight": 1.0 + }, + { + "script_score": { + "script": { + "file": "x" + } + } + } + ], + "max_boost": 20.0, + "min_score": 1.0, + "query": { + "match_all": {} + }, + "score_mode": "sum" + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/indices/indices-no-match-query-usage.asciidoc b/docs/asciidoc/query-dsl/compound/indices/indices-no-match-query-usage.asciidoc new file mode 100644 index 00000000000..0625dd68a8f --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/indices/indices-no-match-query-usage.asciidoc @@ -0,0 +1,55 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[indices-no-match-query-usage]] +== Indices No Match Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Indices(c => c + .Name("named_query") + .Boost(1.1) + .Indices(Nest.Indices.All) + .Query(qq => qq.MatchAll()) + .NoMatchQuery(NoMatchShortcut.All) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new IndicesQuery() +{ + Name = "named_query", + Boost = 1.1, + Indices = Nest.Indices.All, + Query = new MatchAllQuery(), + NoMatchQuery = new NoMatchQueryContainer { Shortcut = NoMatchShortcut.All } +} +---- + +[source,javascript] +.Example json output +---- +{ + "indices": { + "_name": "named_query", + "boost": 1.1, + "indices": [ + "_all" + ], + "no_match_query": "all", + "query": { + "match_all": {} + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/indices/indices-query-usage.asciidoc b/docs/asciidoc/query-dsl/compound/indices/indices-query-usage.asciidoc new file mode 100644 index 00000000000..18fb2e392f5 --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/indices/indices-query-usage.asciidoc @@ -0,0 +1,60 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[indices-query-usage]] +== Indices Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Indices(c => c + .Name("named_query") + .Boost(1.1) + .Indices(Index()) + .Query(qq => qq.MatchAll()) + .NoMatchQuery(qq => qq.MatchAll(m => m.Name("no_match"))) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new IndicesQuery() +{ + Name = "named_query", + Boost = 1.1, + Indices = Index(), + Query = new MatchAllQuery(), + NoMatchQuery = new MatchAllQuery { Name ="no_match" } + +} +---- + +[source,javascript] +.Example json output +---- +{ + "indices": { + "_name": "named_query", + "boost": 1.1, + "indices": [ + "project" + ], + "no_match_query": { + "match_all": { + "_name": "no_match" + } + }, + "query": { + "match_all": {} + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/limit/limit-query-usage.asciidoc b/docs/asciidoc/query-dsl/compound/limit/limit-query-usage.asciidoc new file mode 100644 index 00000000000..ee23aba47d8 --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/limit/limit-query-usage.asciidoc @@ -0,0 +1,45 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[limit-query-usage]] +== Limit Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Limit(c => c + .Name("named_query") + .Boost(1.1) + .Limit(100) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new LimitQuery +{ + Name = "named_query", + Boost = 1.1, + Limit = 100 +} +---- + +[source,javascript] +.Example json output +---- +{ + "limit": { + "_name": "named_query", + "boost": 1.1, + "limit": 100 + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/not/not-query-usage.asciidoc b/docs/asciidoc/query-dsl/compound/not/not-query-usage.asciidoc new file mode 100644 index 00000000000..efa03beffc0 --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/not/not-query-usage.asciidoc @@ -0,0 +1,62 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[not-query-usage]] +== Not Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Not(c => c + .Name("named_query") + .Boost(1.1) + .Filters( + qq => qq.MatchAll(m => m.Name("query1")), + qq => qq.MatchAll(m => m.Name("query2")) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new NotQuery() +{ + Name = "named_query", + Boost = 1.1, + Filters = new QueryContainer[] { + new MatchAllQuery() { Name = "query1" }, + new MatchAllQuery() { Name = "query2" }, + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "not": { + "_name": "named_query", + "boost": 1.1, + "filters": [ + { + "match_all": { + "_name": "query1" + } + }, + { + "match_all": { + "_name": "query2" + } + } + ] + } +} +---- + diff --git a/docs/asciidoc/query-dsl/compound/or/or-query-usage.asciidoc b/docs/asciidoc/query-dsl/compound/or/or-query-usage.asciidoc new file mode 100644 index 00000000000..202ede1811c --- /dev/null +++ b/docs/asciidoc/query-dsl/compound/or/or-query-usage.asciidoc @@ -0,0 +1,62 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[or-query-usage]] +== Or Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Or(c => c + .Name("named_query") + .Boost(1.1) + .Filters( + qq => qq.MatchAll(m => m.Name("query1")), + qq => qq.MatchAll(m => m.Name("query2")) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new OrQuery() +{ + Name = "named_query", + Boost = 1.1, + Filters = new QueryContainer[] { + new MatchAllQuery() { Name = "query1" }, + new MatchAllQuery() { Name = "query2" }, + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "or": { + "_name": "named_query", + "boost": 1.1, + "filters": [ + { + "match_all": { + "_name": "query1" + } + }, + { + "match_all": { + "_name": "query2" + } + } + ] + } +} +---- + diff --git a/docs/asciidoc/query-dsl/geo/bounding-box/geo-bounding-box-query-usage.asciidoc b/docs/asciidoc/query-dsl/geo/bounding-box/geo-bounding-box-query-usage.asciidoc new file mode 100644 index 00000000000..75eb66470bd --- /dev/null +++ b/docs/asciidoc/query-dsl/geo/bounding-box/geo-bounding-box-query-usage.asciidoc @@ -0,0 +1,75 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-bounding-box-query-usage]] +== Geo Bounding Box Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.GeoBoundingBox(g=>g + .Boost(1.1) + .Name("named_query") + .Field(p=>p.Location) + .BoundingBox(b=>b + .TopLeft(34, -34) + .BottomRight(-34, 34) + ) + .Coerce() + .IgnoreMalformed() + .ValidationMethod(GeoValidationMethod.Strict) + .Type(GeoExecution.Indexed) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new GeoBoundingBoxQuery +{ + Boost = 1.1, + Name = "named_query", + Field = Infer.Field(p => p.Location), + BoundingBox = new Nest.BoundingBox + { + TopLeft = new GeoLocation(34,-34), + BottomRight = new GeoLocation(-34,34), + }, + Type = GeoExecution.Indexed, + Coerce = true, + IgnoreMalformed = true, + ValidationMethod = GeoValidationMethod.Strict +} +---- + +[source,javascript] +.Example json output +---- +{ + "geo_bounding_box": { + "type": "indexed", + "coerce": true, + "ignore_malformed": true, + "validation_method": "strict", + "_name": "named_query", + "boost": 1.1, + "location": { + "top_left": { + "lat": 34.0, + "lon": -34.0 + }, + "bottom_right": { + "lat": -34.0, + "lon": 34.0 + } + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/geo/distance-range/geo-distance-range-query-usage.asciidoc b/docs/asciidoc/query-dsl/geo/distance-range/geo-distance-range-query-usage.asciidoc new file mode 100644 index 00000000000..55f572f3208 --- /dev/null +++ b/docs/asciidoc/query-dsl/geo/distance-range/geo-distance-range-query-usage.asciidoc @@ -0,0 +1,77 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-distance-range-query-usage]] +== Geo Distance Range Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.GeoDistanceRange(g=>g + .Boost(1.1) + .Name("named_query") + .Field(p=>p.Location) + .DistanceType(GeoDistanceType.Arc) + .Coerce() + .GreaterThanOrEqualTo(200, DistanceUnit.Kilometers) + .GreaterThan(200, DistanceUnit.Kilometers) + .IgnoreMalformed() + .Location(new GeoLocation(40, -70)) + .Optimize(GeoOptimizeBBox.Indexed) + .LessThanOrEqualTo(Nest.Distance.Miles(400)) + .LessThan(Nest.Distance.Miles(400)) + .ValidationMethod(GeoValidationMethod.Strict) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new GeoDistanceRangeQuery +{ + Boost = 1.1, + Name = "named_query", + Field = Infer.Field(p=>p.Location), + DistanceType = GeoDistanceType.Arc, + Coerce = true, + GreaterThanOrEqualTo = Nest.Distance.Kilometers(200), + IgnoreMalformed = true, + GreaterThan = Nest.Distance.Kilometers(200), + LessThan = Nest.Distance.Miles(400), + Location = new GeoLocation(40, -70), + OptimizeBoundingBox = GeoOptimizeBBox.Indexed, + LessThanOrEqualTo = Nest.Distance.Miles(400), + ValidationMethod = GeoValidationMethod.Strict +} +---- + +[source,javascript] +.Example json output +---- +{ + "geo_distance_range": { + "gt": "200.0km", + "gte": "200.0km", + "lt": "400.0mi", + "lte": "400.0mi", + "distance_type": "arc", + "optimize_bbox": "indexed", + "coerce": true, + "ignore_malformed": true, + "validation_method": "strict", + "_name": "named_query", + "boost": 1.1, + "location": { + "lat": 40.0, + "lon": -70.0 + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/geo/distance/geo-distance-query-usage.asciidoc b/docs/asciidoc/query-dsl/geo/distance/geo-distance-query-usage.asciidoc new file mode 100644 index 00000000000..d4cc505c60d --- /dev/null +++ b/docs/asciidoc/query-dsl/geo/distance/geo-distance-query-usage.asciidoc @@ -0,0 +1,68 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-distance-query-usage]] +== Geo Distance Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.GeoDistance(g=>g + .Boost(1.1) + .Name("named_query") + .Field(p=>p.Location) + .DistanceType(GeoDistanceType.Arc) + .Coerce() + .Location(34, -34) + .Distance("200.0m") + .IgnoreMalformed() + .Optimize(GeoOptimizeBBox.Memory) + .ValidationMethod(GeoValidationMethod.Strict) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new GeoDistanceQuery +{ + Boost = 1.1, + Name = "named_query", + Field = Infer.Field(p => p.Location), + DistanceType = GeoDistanceType.Arc, + Coerce = true, + Location = new GeoLocation(34,-34), + Distance = "200.0m", + IgnoreMalformed = true, + OptimizeBoundingBox = GeoOptimizeBBox.Memory, + ValidationMethod = GeoValidationMethod.Strict +} +---- + +[source,javascript] +.Example json output +---- +{ + "geo_distance": { + "_name": "named_query", + "boost": 1.1, + "distance": "200.0m", + "optimize_bbox": "memory", + "distance_type": "arc", + "coerce": true, + "ignore_malformed": true, + "validation_method": "strict", + "location": { + "lat": 34.0, + "lon": -34.0 + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/geo/hash-cell/geo-hash-cell-query-usage.asciidoc b/docs/asciidoc/query-dsl/geo/hash-cell/geo-hash-cell-query-usage.asciidoc new file mode 100644 index 00000000000..9db82c4a816 --- /dev/null +++ b/docs/asciidoc/query-dsl/geo/hash-cell/geo-hash-cell-query-usage.asciidoc @@ -0,0 +1,56 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-hash-cell-query-usage]] +== Geo Hash Cell Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.GeoHashCell(c => c + .Name("named_query") + .Boost(1.1) + .Field(p=>p.Location) + .Location(new GeoLocation(13.4080, 52.5186)) + .Neighbors() + .Precision(Nest.Distance.Meters(3)) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new GeoHashCellQuery +{ + Boost = 1.1, + Name = "named_query", + Field = Infer.Field(p=>p.Location), + Location = new GeoLocation(13.4080, 52.5186), + Neighbors = true, + Precision = Nest.Distance.Meters(3) +} +---- + +[source,javascript] +.Example json output +---- +{ + "geohash_cell": { + "_name": "named_query", + "boost": 1.1, + "precision": "3.0m", + "neighbors": true, + "location": { + "lat": 13.408, + "lon": 52.5186 + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/geo/polygon/geo-polygon-query-usage.asciidoc b/docs/asciidoc/query-dsl/geo/polygon/geo-polygon-query-usage.asciidoc new file mode 100644 index 00000000000..15f49d76c2d --- /dev/null +++ b/docs/asciidoc/query-dsl/geo/polygon/geo-polygon-query-usage.asciidoc @@ -0,0 +1,67 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-polygon-query-usage]] +== Geo Polygon Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.GeoPolygon(c => c + .Name("named_query") + .Boost(1.1) + .Field(p=>p.Location) + .IgnoreMalformed() + .Coerce() + .ValidationMethod(GeoValidationMethod.Strict) + .Points( new GeoLocation(45,-45), new GeoLocation(-34,34)) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new GeoPolygonQuery +{ + Boost = 1.1, + Name = "named_query", + ValidationMethod = GeoValidationMethod.Strict, + Coerce = true, + IgnoreMalformed = true, + Points = new [] { new GeoLocation(45,-45), new GeoLocation(-34,34), }, + Field = Field(p=>p.Location) +} +---- + +[source,javascript] +.Example json output +---- +{ + "geo_polygon": { + "_name": "named_query", + "boost": 1.1, + "coerce": true, + "ignore_malformed": true, + "validation_method": "strict", + "location": { + "points": [ + { + "lat": 45.0, + "lon": -45.0 + }, + { + "lat": -34.0, + "lon": 34.0 + } + ] + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/geo/shape/circle/geo-shape-circle-usage.asciidoc b/docs/asciidoc/query-dsl/geo/shape/circle/geo-shape-circle-usage.asciidoc new file mode 100644 index 00000000000..a3185ee4299 --- /dev/null +++ b/docs/asciidoc/query-dsl/geo/shape/circle/geo-shape-circle-usage.asciidoc @@ -0,0 +1,36 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-shape-circle-usage]] +== Geo Shape Circle Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.GeoShapeCircle(c => c + .Name("named_query") + .Boost(1.1) + .Field(p=>p.Location) + .Coordinates(this._coordinates) + .Radius("100m") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new GeoShapeCircleQuery +{ + Name = "named_query", + Boost = 1.1, + Field = Field(p=>p.Location), + Shape = new CircleGeoShape(this._coordinates) { Radius = "100m" } +} +---- + diff --git a/docs/asciidoc/query-dsl/geo/shape/envelope/geo-envelope-usage.asciidoc b/docs/asciidoc/query-dsl/geo/shape/envelope/geo-envelope-usage.asciidoc new file mode 100644 index 00000000000..67a68dca84f --- /dev/null +++ b/docs/asciidoc/query-dsl/geo/shape/envelope/geo-envelope-usage.asciidoc @@ -0,0 +1,35 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-envelope-usage]] +== Geo Envelope Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.GeoShapeEnvelope(c => c + .Name("named_query") + .Boost(1.1) + .Field(p=>p.Location) + .Coordinates(this._coordinates) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new GeoShapeEnvelopeQuery +{ + Name = "named_query", + Boost = 1.1, + Field = Field(p=>p.Location), + Shape = new EnvelopeGeoShape(this._coordinates) +} +---- + diff --git a/docs/asciidoc/query-dsl/geo/shape/indexed-shape/geo-indexed-shape-usage.asciidoc b/docs/asciidoc/query-dsl/geo/shape/indexed-shape/geo-indexed-shape-usage.asciidoc new file mode 100644 index 00000000000..935d4062bc7 --- /dev/null +++ b/docs/asciidoc/query-dsl/geo/shape/indexed-shape/geo-indexed-shape-usage.asciidoc @@ -0,0 +1,63 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-indexed-shape-usage]] +== Geo Indexed Shape Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.GeoIndexedShape(c => c + .Name("named_query") + .Boost(1.1) + .Field(p=>p.Location) + .IndexedShape(p=>p + .Id(2) + .Path(pp=>pp.Location) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new GeoIndexedShapeQuery +{ + Name = "named_query", + Boost = 1.1, + Field = Field(p=>p.Location), + IndexedShape = new FieldLookup + { + Id = 2, + Index = Index(), + Type = Type(), + Path = Field(p=>p.Location) + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "geo_shape": { + "location": { + "_name": "named_query", + "boost": 1.1, + "indexed_shape": { + "id": 2, + "type": "project", + "index": "project", + "path": "location" + } + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/geo/shape/line-string/geo-line-string-usage.asciidoc b/docs/asciidoc/query-dsl/geo/shape/line-string/geo-line-string-usage.asciidoc new file mode 100644 index 00000000000..1d47da60b4a --- /dev/null +++ b/docs/asciidoc/query-dsl/geo/shape/line-string/geo-line-string-usage.asciidoc @@ -0,0 +1,35 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-line-string-usage]] +== Geo Line String Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.GeoShapeLineString(c => c + .Name("named_query") + .Boost(1.1) + .Field(p=>p.Location) + .Coordinates(this._coordinates) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new GeoShapeLineStringQuery +{ + Name = "named_query", + Boost = 1.1, + Field = Field(p=>p.Location), + Shape = new LineStringGeoShape(this._coordinates) +} +---- + diff --git a/docs/asciidoc/query-dsl/geo/shape/multi-line-string/geo-multi-line-string-usage.asciidoc b/docs/asciidoc/query-dsl/geo/shape/multi-line-string/geo-multi-line-string-usage.asciidoc new file mode 100644 index 00000000000..d1844b32a90 --- /dev/null +++ b/docs/asciidoc/query-dsl/geo/shape/multi-line-string/geo-multi-line-string-usage.asciidoc @@ -0,0 +1,35 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-multi-line-string-usage]] +== Geo Multi Line String Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.GeoShapeMultiLineString(c => c + .Name("named_query") + .Boost(1.1) + .Field(p=>p.Location) + .Coordinates(this._coordinates) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new GeoShapeMultiLineStringQuery +{ + Name = "named_query", + Boost = 1.1, + Field = Field(p=>p.Location), + Shape = new MultiLineStringGeoShape(this._coordinates) +} +---- + diff --git a/docs/asciidoc/query-dsl/geo/shape/multi-point/geo-multi-point-usage.asciidoc b/docs/asciidoc/query-dsl/geo/shape/multi-point/geo-multi-point-usage.asciidoc new file mode 100644 index 00000000000..2ec90304aed --- /dev/null +++ b/docs/asciidoc/query-dsl/geo/shape/multi-point/geo-multi-point-usage.asciidoc @@ -0,0 +1,35 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-multi-point-usage]] +== Geo Multi Point Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.GeoShapeMultiPoint(c => c + .Name("named_query") + .Boost(1.1) + .Field(p=>p.Location) + .Coordinates(this._coordinates) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new GeoShapeMultiPointQuery +{ + Name = "named_query", + Boost = 1.1, + Field = Field(p=>p.Location), + Shape = new MultiPointGeoShape(this._coordinates) +} +---- + diff --git a/docs/asciidoc/query-dsl/geo/shape/point/geo-point-usage.asciidoc b/docs/asciidoc/query-dsl/geo/shape/point/geo-point-usage.asciidoc new file mode 100644 index 00000000000..11c185b017f --- /dev/null +++ b/docs/asciidoc/query-dsl/geo/shape/point/geo-point-usage.asciidoc @@ -0,0 +1,35 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-point-usage]] +== Geo Point Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.GeoShapePoint(c => c + .Name("named_query") + .Boost(1.1) + .Field(p=>p.Location) + .Coordinates(this._coordinates) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new GeoShapePointQuery +{ + Name = "named_query", + Boost = 1.1, + Field = Field(p=>p.Location), + Shape = new PointGeoShape(this._coordinates) +} +---- + diff --git a/docs/asciidoc/query-dsl/geo/shape/polygon/geo-polygon-usage.asciidoc b/docs/asciidoc/query-dsl/geo/shape/polygon/geo-polygon-usage.asciidoc new file mode 100644 index 00000000000..9b0a72c8f31 --- /dev/null +++ b/docs/asciidoc/query-dsl/geo/shape/polygon/geo-polygon-usage.asciidoc @@ -0,0 +1,35 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[geo-polygon-usage]] +== Geo Polygon Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.GeoShapePolygon(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Location) + .Coordinates(this._coordinates) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new GeoShapePolygonQuery +{ + Name = "named_query", + Boost = 1.1, + Field = Field(p => p.Location), + Shape = new PolygonGeoShape(this._coordinates) { } +} +---- + diff --git a/docs/asciidoc/query-dsl/joining/has-child/has-child-query-usage.asciidoc b/docs/asciidoc/query-dsl/joining/has-child/has-child-query-usage.asciidoc new file mode 100644 index 00000000000..3e5083af76e --- /dev/null +++ b/docs/asciidoc/query-dsl/joining/has-child/has-child-query-usage.asciidoc @@ -0,0 +1,63 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[has-child-query-usage]] +== Has Child Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.HasChild(c => c + .Name("named_query") + .Boost(1.1) + .InnerHits(i=>i.Explain()) + .MaxChildren(5) + .MinChildren(1) + .ScoreMode(ChildScoreMode.Average) + .Query(qq=>qq.MatchAll()) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new HasChildQuery +{ + Name = "named_query", + Boost = 1.1, + Type = Infer.Type(), + InnerHits = new InnerHits { Explain = true }, + MaxChildren = 5, + MinChildren = 1, + Query = new MatchAllQuery(), + ScoreMode = ChildScoreMode.Average +} +---- + +[source,javascript] +.Example json output +---- +{ + "has_child": { + "_name": "named_query", + "boost": 1.1, + "type": "developer", + "score_mode": "avg", + "min_children": 1, + "max_children": 5, + "query": { + "match_all": {} + }, + "inner_hits": { + "explain": true + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/joining/has-parent/has-parent-query-usage.asciidoc b/docs/asciidoc/query-dsl/joining/has-parent/has-parent-query-usage.asciidoc new file mode 100644 index 00000000000..30488fd07a7 --- /dev/null +++ b/docs/asciidoc/query-dsl/joining/has-parent/has-parent-query-usage.asciidoc @@ -0,0 +1,58 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[has-parent-query-usage]] +== Has Parent Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.HasParent(c => c + .Name("named_query") + .Boost(1.1) + .InnerHits(i=>i.Explain()) + .ScoreMode(ParentScoreMode.Score) + .Query(qq=>qq.MatchAll()) + +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new HasParentQuery +{ + Name = "named_query", + Boost = 1.1, + Type = Infer.Type(), + InnerHits = new InnerHits { Explain = true }, + Query = new MatchAllQuery(), + ScoreMode = ParentScoreMode.Score +} +---- + +[source,javascript] +.Example json output +---- +{ + "has_parent": { + "_name": "named_query", + "boost": 1.1, + "type": "developer", + "score_mode": "score", + "query": { + "match_all": {} + }, + "inner_hits": { + "explain": true + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/joining/nested/nested-query-usage.asciidoc b/docs/asciidoc/query-dsl/joining/nested/nested-query-usage.asciidoc new file mode 100644 index 00000000000..860c99a0aa1 --- /dev/null +++ b/docs/asciidoc/query-dsl/joining/nested/nested-query-usage.asciidoc @@ -0,0 +1,75 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[nested-query-usage]] +== Nested Query Usage + +Nested query allows to query nested objects / docs (see {ref_current}/nested.html[nested mapping]). +The query is executed against the nested objects / docs as if they were indexed as separate +docs (they are, internally) and resulting in the root parent doc (or parent nested mapping). + +See the Elasticsearch documentation on {ref_current}/query-dsl-nested-query.html[nested query] for more details. + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Nested(c => c + .Name("named_query") + .Boost(1.1) + .InnerHits(i=>i.Explain()) + .Path(p=>p.CuratedTags) + .Query(nq => nq + .Terms(t => t + .Field(f => f.CuratedTags.First().Name) + .Terms("lorem", "ipsum") + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new NestedQuery +{ + Name = "named_query", + Boost = 1.1, + InnerHits = new InnerHits { Explain = true }, + Path = Field(p => p.CuratedTags), + Query = new TermsQuery + { + Field = Field(p => p.CuratedTags.First().Name), + Terms = new[] { "lorem", "ipsum" } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "nested": { + "_name": "named_query", + "boost": 1.1, + "query": { + "terms": { + "curatedTags.name": [ + "lorem", + "ipsum" + ] + } + }, + "path": "curatedTags", + "inner_hits": { + "explain": true + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/nest-specific/raw/raw-combine-usage.asciidoc b/docs/asciidoc/query-dsl/nest-specific/raw/raw-combine-usage.asciidoc new file mode 100644 index 00000000000..5250786a5b3 --- /dev/null +++ b/docs/asciidoc/query-dsl/nest-specific/raw/raw-combine-usage.asciidoc @@ -0,0 +1,47 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[raw-combine-usage]] +== Raw Combine Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q.Raw(RawTermQuery) && q.Term("x", "y") +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new RawQuery(RawTermQuery) +&& new TermQuery { Field = "x", Value = "y" } +---- + +[source,javascript] +.Example json output +---- +{ + "bool": { + "must": [ + { + "term": { + "fieldname": "value" + } + }, + { + "term": { + "x": { + "value": "y" + } + } + } + ] + } +} +---- + diff --git a/docs/asciidoc/query-dsl/nest-specific/raw/raw-query-usage.asciidoc b/docs/asciidoc/query-dsl/nest-specific/raw/raw-query-usage.asciidoc new file mode 100644 index 00000000000..8a981e5c065 --- /dev/null +++ b/docs/asciidoc/query-dsl/nest-specific/raw/raw-query-usage.asciidoc @@ -0,0 +1,34 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[raw-query-usage]] +== Raw Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Raw(RawTermQuery) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new RawQuery(RawTermQuery) +---- + +[source,javascript] +.Example json output +---- +{ + "term": { + "fieldname": "value" + } +} +---- + diff --git a/docs/asciidoc/query-dsl/span/container/span-containing-query-usage.asciidoc b/docs/asciidoc/query-dsl/span/container/span-containing-query-usage.asciidoc new file mode 100644 index 00000000000..7b7118358e7 --- /dev/null +++ b/docs/asciidoc/query-dsl/span/container/span-containing-query-usage.asciidoc @@ -0,0 +1,64 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[span-containing-query-usage]] +== Span Containing Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.SpanContaining(sn => sn + .Name("named_query") + .Boost(1.1) + .Little(i=>i + .SpanTerm(st=>st.Field("field1").Value("hoya")) + ) + .Big(e=>e + .SpanTerm(st=>st.Field("field1").Value("hoya2")) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SpanContainingQuery +{ + Name = "named_query", + Boost = 1.1, + Little = new SpanQuery { SpanTerm = new SpanTermQuery { Field = "field1", Value = "hoya"} }, + Big = new SpanQuery { SpanTerm = new SpanTermQuery { Field = "field1", Value = "hoya2"} }, +} +---- + +[source,javascript] +.Example json output +---- +{ + "span_containing": { + "_name": "named_query", + "boost": 1.1, + "little": { + "span_term": { + "field1": { + "value": "hoya" + } + } + }, + "big": { + "span_term": { + "field1": { + "value": "hoya2" + } + } + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/span/first/span-first-query-usage.asciidoc b/docs/asciidoc/query-dsl/span/first/span-first-query-usage.asciidoc new file mode 100644 index 00000000000..73b2725f3b7 --- /dev/null +++ b/docs/asciidoc/query-dsl/span/first/span-first-query-usage.asciidoc @@ -0,0 +1,59 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[span-first-query-usage]] +== Span First Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.SpanFirst(c => c + .Name("named_query") + .Boost(1.1) + .Match(sq=>sq + .SpanTerm(st=>st.Field(p=>p.Name).Value("value")) + ) + .End(3) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SpanFirstQuery +{ + Name = "named_query", + Boost = 1.1, + End = 3, + Match = new SpanQuery + { + SpanTerm = new SpanTermQuery { Field = "name", Value = "value" } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "span_first": { + "_name": "named_query", + "boost": 1.1, + "match": { + "span_term": { + "name": { + "value": "value" + } + } + }, + "end": 3 + } +} +---- + diff --git a/docs/asciidoc/query-dsl/span/multi-term/span-multi-term-query-usage.asciidoc b/docs/asciidoc/query-dsl/span/multi-term/span-multi-term-query-usage.asciidoc new file mode 100644 index 00000000000..8e0db66aebe --- /dev/null +++ b/docs/asciidoc/query-dsl/span/multi-term/span-multi-term-query-usage.asciidoc @@ -0,0 +1,53 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[span-multi-term-query-usage]] +== Span Multi Term Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.SpanMultiTerm(c => c + .Name("named_query") + .Boost(1.1) + .Match(sq=>sq + .Prefix(pr=>pr.Field(p=>p.Name).Value("pre-*")) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SpanMultiTermQuery +{ + Name = "named_query", + Boost = 1.1, + Match = new PrefixQuery { Field = "name", Value = "pre-*" } +} +---- + +[source,javascript] +.Example json output +---- +{ + "span_multi": { + "_name": "named_query", + "boost": 1.1, + "match": { + "prefix": { + "name": { + "value": "pre-*" + } + } + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/span/near/span-near-query-usage.asciidoc b/docs/asciidoc/query-dsl/span/near/span-near-query-usage.asciidoc new file mode 100644 index 00000000000..aaac1e9a051 --- /dev/null +++ b/docs/asciidoc/query-dsl/span/near/span-near-query-usage.asciidoc @@ -0,0 +1,85 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[span-near-query-usage]] +== Span Near Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.SpanNear(sn => sn + .Name("named_query") + .Boost(1.1) + .Clauses( + c=>c.SpanTerm(st=>st.Field("field").Value("value1")), + c=>c.SpanTerm(st=>st.Field("field").Value("value2")), + c=>c.SpanTerm(st=>st.Field("field").Value("value3")) + ) + .Slop(12) + .InOrder(false) + .CollectPayloads(false) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SpanNearQuery +{ + Name = "named_query", + Boost = 1.1, + Clauses = new List + { + new SpanQuery { SpanTerm = new SpanTermQuery { Field = "field", Value = "value1" } }, + new SpanQuery { SpanTerm = new SpanTermQuery { Field = "field", Value = "value2" } }, + new SpanQuery { SpanTerm = new SpanTermQuery { Field = "field", Value = "value3" } } + }, + Slop = 12, + InOrder = false, + CollectPayloads = false +} +---- + +[source,javascript] +.Example json output +---- +{ + "span_near": { + "clauses": [ + { + "span_term": { + "field": { + "value": "value1" + } + } + }, + { + "span_term": { + "field": { + "value": "value2" + } + } + }, + { + "span_term": { + "field": { + "value": "value3" + } + } + } + ], + "slop": 12, + "in_order": false, + "collect_payloads": false, + "_name": "named_query", + "boost": 1.1 + } +} +---- + diff --git a/docs/asciidoc/query-dsl/span/not/span-not-query-usage.asciidoc b/docs/asciidoc/query-dsl/span/not/span-not-query-usage.asciidoc new file mode 100644 index 00000000000..6ef96d2e2a0 --- /dev/null +++ b/docs/asciidoc/query-dsl/span/not/span-not-query-usage.asciidoc @@ -0,0 +1,85 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[span-not-query-usage]] +== Span Not Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.SpanNot(sn => sn + .Name("named_query") + .Boost(1.1) + .Dist(12) + .Post(13) + .Pre(14) + .Include(i => i + .SpanTerm(st => st.Field("field1").Value("hoya")) + ) + .Exclude(e => e + .SpanTerm(st => st.Field("field1").Value("hoya2")) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SpanNotQuery +{ + Name = "named_query", + Boost = 1.1, + Dist = 12, + Post = 13, + Pre = 14, + Include = new SpanQuery + { + SpanTerm = new SpanTermQuery + { + Field = "field1", Value = "hoya" + } + }, + Exclude = new SpanQuery + { + SpanTerm = new SpanTermQuery + { + Field = "field1", Value = "hoya2" + } + }, +} +---- + +[source,javascript] +.Example json output +---- +{ + "span_not": { + "_name": "named_query", + "boost": 1.1, + "include": { + "span_term": { + "field1": { + "value": "hoya" + } + } + }, + "exclude": { + "span_term": { + "field1": { + "value": "hoya2" + } + } + }, + "pre": 14, + "post": 13, + "dist": 12 + } +} +---- + diff --git a/docs/asciidoc/query-dsl/span/or/span-or-query-usage.asciidoc b/docs/asciidoc/query-dsl/span/or/span-or-query-usage.asciidoc new file mode 100644 index 00000000000..83640752cd1 --- /dev/null +++ b/docs/asciidoc/query-dsl/span/or/span-or-query-usage.asciidoc @@ -0,0 +1,76 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[span-or-query-usage]] +== Span Or Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.SpanOr(sn => sn + .Name("named_query") + .Boost(1.1) + .Clauses( + c => c.SpanTerm(st => st.Field("field").Value("value1")), + c => c.SpanTerm(st => st.Field("field").Value("value2")), + c => c.SpanTerm(st => st.Field("field").Value("value3")) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SpanOrQuery +{ + Name = "named_query", + Boost = 1.1, + Clauses = new List + { + new SpanQuery { SpanTerm = new SpanTermQuery { Field = "field", Value = "value1" } }, + new SpanQuery { SpanTerm = new SpanTermQuery { Field = "field", Value = "value2" } }, + new SpanQuery { SpanTerm = new SpanTermQuery { Field = "field", Value = "value3" } } + }, +} +---- + +[source,javascript] +.Example json output +---- +{ + "span_or": { + "_name": "named_query", + "boost": 1.1, + "clauses": [ + { + "span_term": { + "field": { + "value": "value1" + } + } + }, + { + "span_term": { + "field": { + "value": "value2" + } + } + }, + { + "span_term": { + "field": { + "value": "value3" + } + } + } + ] + } +} +---- + diff --git a/docs/asciidoc/query-dsl/span/term/span-term-query-usage.asciidoc b/docs/asciidoc/query-dsl/span/term/span-term-query-usage.asciidoc new file mode 100644 index 00000000000..3465be21e88 --- /dev/null +++ b/docs/asciidoc/query-dsl/span/term/span-term-query-usage.asciidoc @@ -0,0 +1,49 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[span-term-query-usage]] +== Span Term Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.SpanTerm(c => c + .Name("named_query") + .Boost(1.1) + .Field("user") + .Value("kimchy") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SpanTermQuery +{ + Name = "named_query", + Boost = 1.1, + Value = "kimchy", + Field = "user" +} +---- + +[source,javascript] +.Example json output +---- +{ + "span_term": { + "user": { + "_name": "named_query", + "boost": 1.1, + "value": "kimchy" + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/span/within/span-within-query-usage.asciidoc b/docs/asciidoc/query-dsl/span/within/span-within-query-usage.asciidoc new file mode 100644 index 00000000000..2aaddef7c6a --- /dev/null +++ b/docs/asciidoc/query-dsl/span/within/span-within-query-usage.asciidoc @@ -0,0 +1,64 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[span-within-query-usage]] +== Span Within Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.SpanWithin(sn => sn + .Name("named_query") + .Boost(1.1) + .Little(i=>i + .SpanTerm(st=>st.Field("field1").Value("hoya")) + ) + .Big(e=>e + .SpanTerm(st=>st.Field("field1").Value("hoya2")) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SpanWithinQuery +{ + Name = "named_query", + Boost = 1.1, + Little = new SpanQuery { SpanTerm = new SpanTermQuery { Field = "field1", Value = "hoya"} }, + Big = new SpanQuery { SpanTerm = new SpanTermQuery { Field = "field1", Value = "hoya2"} }, +} +---- + +[source,javascript] +.Example json output +---- +{ + "span_within": { + "_name": "named_query", + "boost": 1.1, + "little": { + "span_term": { + "field1": { + "value": "hoya" + } + } + }, + "big": { + "span_term": { + "field1": { + "value": "hoya2" + } + } + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/specialized/more-like-this/more-like-this-query-usage.asciidoc b/docs/asciidoc/query-dsl/specialized/more-like-this/more-like-this-query-usage.asciidoc new file mode 100644 index 00000000000..bbb1ae7e330 --- /dev/null +++ b/docs/asciidoc/query-dsl/specialized/more-like-this/more-like-this-query-usage.asciidoc @@ -0,0 +1,110 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[more-like-this-query-usage]] +== More Like This Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.MoreLikeThis(sn => sn + .Name("named_query") + .Boost(1.1) + .Like(l=>l + .Document(d=>d .Id(Project.Instance.Name)) + .Text("some long text") + ) + .Analyzer("some_analyzer") + .BoostTerms(1.1) + .Include() + .MaxDocumentFrequency(12) + .MaxQueryTerms(12) + .MaxWordLength(300) + .MinDocumentFrequency(1) + .MinTermFrequency(1) + .MinWordLength(10) + .StopWords("and", "the") + .MinimumShouldMatch(1) + .Fields(f=>f.Field(p=>p.Name)) + .Unlike(l=>l + .Text("not like this text") + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new MoreLikeThisQuery +{ + Name = "named_query", + Boost = 1.1, + Fields = Fields(p=>p.Name), + Like = new List + { + new LikeDocument(Project.Instance.Name), + "some long text" + }, + Analyzer = "some_analyzer", + BoostTerms = 1.1, + Include = true, + MaxDocumentFrequency = 12, + MaxQueryTerms = 12, + MaxWordLength = 300, + MinDocumentFrequency = 1, + MinTermFrequency = 1, + MinWordLength = 10, + MinimumShouldMatch = 1, + StopWords = new [] { "and", "the"}, + Unlike = new List + { + "not like this text" + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "mlt": { + "fields": [ + "name" + ], + "minimum_should_match": 1, + "stop_words": [ + "and", + "the" + ], + "min_term_freq": 1, + "max_query_terms": 12, + "min_doc_freq": 1, + "max_doc_freq": 12, + "min_word_len": 10, + "max_word_len": 300, + "boost_terms": 1.1, + "analyzer": "some_analyzer", + "include": true, + "like": [ + { + "_index": "project", + "_type": "project", + "_id": "Durgan LLC" + }, + "some long text" + ], + "unlike": [ + "not like this text" + ], + "_name": "named_query", + "boost": 1.1 + } +} +---- + diff --git a/docs/asciidoc/query-dsl/specialized/script/script-query-usage.asciidoc b/docs/asciidoc/query-dsl/specialized/script/script-query-usage.asciidoc new file mode 100644 index 00000000000..ae18b375a97 --- /dev/null +++ b/docs/asciidoc/query-dsl/specialized/script/script-query-usage.asciidoc @@ -0,0 +1,53 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[script-query-usage]] +== Script Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Script(sn => sn + .Name("named_query") + .Boost(1.1) + .Inline(_templateString) + .Params(p=>p.Add("param1", 1)) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new ScriptQuery +{ + Name = "named_query", + Boost = 1.1, + Inline = _templateString, + Params = new Dictionary + { + { "param1", 1 } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "script": { + "_name": "named_query", + "boost": 1.1, + "inline": "doc['num1'].value > param1", + "params": { + "param1": 1 + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/specialized/template/template-query-usage.asciidoc b/docs/asciidoc/query-dsl/specialized/template/template-query-usage.asciidoc new file mode 100644 index 00000000000..13361285b06 --- /dev/null +++ b/docs/asciidoc/query-dsl/specialized/template/template-query-usage.asciidoc @@ -0,0 +1,53 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[template-query-usage]] +== Template Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Template(sn => sn + .Name("named_query") + .Boost(1.1) + .Inline(_templateString) + .Params(p=>p.Add("query_string", "all about search")) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new TemplateQuery +{ + Name = "named_query", + Boost = 1.1, + Inline = _templateString, + Params = new Dictionary + { + { "query_string", "all about search" } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "template": { + "_name": "named_query", + "boost": 1.1, + "inline": "{ \"match\": { \"text\": \"{{query_string}}\" } }", + "params": { + "query_string": "all about search" + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/exists/exists-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/exists/exists-query-usage.asciidoc new file mode 100644 index 00000000000..974d4b6c7ac --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/exists/exists-query-usage.asciidoc @@ -0,0 +1,45 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[exists-query-usage]] +== Exists Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Exists(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new ExistsQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", +} +---- + +[source,javascript] +.Example json output +---- +{ + "exists": { + "_name": "named_query", + "boost": 1.1, + "field": "description" + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/fuzzy/fuzzy-date-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/fuzzy/fuzzy-date-query-usage.asciidoc new file mode 100644 index 00000000000..bee0aeb2619 --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/fuzzy/fuzzy-date-query-usage.asciidoc @@ -0,0 +1,64 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[fuzzy-date-query-usage]] +== Fuzzy Date Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.FuzzyDate(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .Fuzziness(TimeSpan.FromDays(2)) + .Value(Project.Instance.StartedOn) + .MaxExpansions(100) + .PrefixLength(3) + .Rewrite(RewriteMultiTerm.ConstantScore) + .Transpositions() +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new FuzzyDateQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + Fuzziness = TimeSpan.FromDays(2), + Value = Project.Instance.StartedOn, + MaxExpansions = 100, + PrefixLength = 3, + Rewrite = RewriteMultiTerm.ConstantScore, + Transpositions = true +} +---- + +[source,javascript] +.Example json output +---- +{ + "fuzzy": { + "description": { + "_name": "named_query", + "boost": 1.1, + "fuzziness": "2d", + "max_expansions": 100, + "prefix_length": 3, + "rewrite": "constant_score", + "transpositions": true, + "value": "2015-01-01T00:00:00" + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/fuzzy/fuzzy-numeric-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/fuzzy/fuzzy-numeric-query-usage.asciidoc new file mode 100644 index 00000000000..bc8810eafe2 --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/fuzzy/fuzzy-numeric-query-usage.asciidoc @@ -0,0 +1,64 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[fuzzy-numeric-query-usage]] +== Fuzzy Numeric Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.FuzzyNumeric(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .Fuzziness(2) + .Value(12) + .MaxExpansions(100) + .PrefixLength(3) + .Rewrite(RewriteMultiTerm.ConstantScore) + .Transpositions() +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new FuzzyNumericQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + Fuzziness = 2, + Value = 12, + MaxExpansions = 100, + PrefixLength = 3, + Rewrite = RewriteMultiTerm.ConstantScore, + Transpositions = true +} +---- + +[source,javascript] +.Example json output +---- +{ + "fuzzy": { + "description": { + "_name": "named_query", + "boost": 1.1, + "fuzziness": 2.0, + "max_expansions": 100, + "prefix_length": 3, + "rewrite": "constant_score", + "transpositions": true, + "value": 12.0 + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/fuzzy/fuzzy-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/fuzzy/fuzzy-query-usage.asciidoc new file mode 100644 index 00000000000..1a0d703c63d --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/fuzzy/fuzzy-query-usage.asciidoc @@ -0,0 +1,64 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[fuzzy-query-usage]] +== Fuzzy Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Fuzzy(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .Fuzziness(Fuzziness.Auto) + .Value("ki") + .MaxExpansions(100) + .PrefixLength(3) + .Rewrite(RewriteMultiTerm.ConstantScore) + .Transpositions() +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new FuzzyQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + Fuzziness = Fuzziness.Auto, + Value = "ki", + MaxExpansions = 100, + PrefixLength = 3, + Rewrite = RewriteMultiTerm.ConstantScore, + Transpositions = true +} +---- + +[source,javascript] +.Example json output +---- +{ + "fuzzy": { + "description": { + "_name": "named_query", + "boost": 1.1, + "fuzziness": "AUTO", + "max_expansions": 100, + "prefix_length": 3, + "rewrite": "constant_score", + "transpositions": true, + "value": "ki" + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/ids/ids-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/ids/ids-query-usage.asciidoc new file mode 100644 index 00000000000..19cd0489925 --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/ids/ids-query-usage.asciidoc @@ -0,0 +1,56 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[ids-query-usage]] +== Ids Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Ids(c => c + .Name("named_query") + .Boost(1.1) + .Values(1, 2, 3, 4) + .Types(typeof(Project), typeof(Developer)) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new IdsQuery +{ + Name = "named_query", + Boost = 1.1, + Values = new List { 1, 2,3,4 }, + Types = Type().And() +} +---- + +[source,javascript] +.Example json output +---- +{ + "ids": { + "_name": "named_query", + "boost": 1.1, + "types": [ + "project", + "developer" + ], + "values": [ + 1, + 2, + 3, + 4 + ] + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/missing/missing-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/missing/missing-query-usage.asciidoc new file mode 100644 index 00000000000..01a6f3ef835 --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/missing/missing-query-usage.asciidoc @@ -0,0 +1,51 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[missing-query-usage]] +== Missing Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Missing(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .NullValue() + .Existence() +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new MissingQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + NullValue = true, + Existence = true +} +---- + +[source,javascript] +.Example json output +---- +{ + "missing": { + "_name": "named_query", + "boost": 1.1, + "existence": true, + "field": "description", + "null_value": true + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/prefix/prefix-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/prefix/prefix-query-usage.asciidoc new file mode 100644 index 00000000000..719986ba859 --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/prefix/prefix-query-usage.asciidoc @@ -0,0 +1,52 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[prefix-query-usage]] +== Prefix Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Prefix(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .Value("proj") + .Rewrite(RewriteMultiTerm.TopTermsBoostN) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new PrefixQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + Value = "proj", + Rewrite = RewriteMultiTerm.TopTermsBoostN +} +---- + +[source,javascript] +.Example json output +---- +{ + "prefix": { + "description": { + "_name": "named_query", + "boost": 1.1, + "rewrite": "top_terms_boost_N", + "value": "proj" + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/range/date-range-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/range/date-range-query-usage.asciidoc new file mode 100644 index 00000000000..eb60fdb35c6 --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/range/date-range-query-usage.asciidoc @@ -0,0 +1,64 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[date-range-query-usage]] +== Date Range Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.DateRange(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .GreaterThan(FixedDate) + .GreaterThanOrEquals(DateMath.Anchored(FixedDate).RoundTo(TimeUnit.Month)) + .LessThan("01/01/2012") + .LessThanOrEquals(DateMath.Now) + .Format("dd/MM/yyyy||yyyy") + .TimeZone("+01:00") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new DateRangeQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + GreaterThan = FixedDate, + GreaterThanOrEqualTo = DateMath.Anchored(FixedDate).RoundTo(TimeUnit.Month), + LessThan = "01/01/2012", + LessThanOrEqualTo = DateMath.Now, + TimeZone = "+01:00", + Format = "dd/MM/yyyy||yyyy" +} +---- + +[source,javascript] +.Example json output +---- +{ + "range": { + "description": { + "_name": "named_query", + "boost": 1.1, + "format": "dd/MM/yyyy||yyyy", + "gt": "2015-06-06T12:01:02.123", + "gte": "2015-06-06T12:01:02.123||/M", + "lt": "01/01/2012", + "lte": "now", + "time_zone": "+01:00" + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/range/numeric-range-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/range/numeric-range-query-usage.asciidoc new file mode 100644 index 00000000000..f4c4e64f443 --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/range/numeric-range-query-usage.asciidoc @@ -0,0 +1,58 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[numeric-range-query-usage]] +== Numeric Range Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Range(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .GreaterThan(1.0) + .GreaterThanOrEquals(1.1) + .LessThan(2.1) + .LessThanOrEquals(2.0) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new NumericRangeQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + GreaterThan = 1.0, + GreaterThanOrEqualTo = 1.1, + LessThan = 2.1, + LessThanOrEqualTo = 2.0 +} +---- + +[source,javascript] +.Example json output +---- +{ + "range": { + "description": { + "_name": "named_query", + "boost": 1.1, + "gt": 1.0, + "gte": 1.1, + "lt": 2.1, + "lte": 2.0 + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/range/term-range-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/range/term-range-query-usage.asciidoc new file mode 100644 index 00000000000..ee93de92f42 --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/range/term-range-query-usage.asciidoc @@ -0,0 +1,58 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[term-range-query-usage]] +== Term Range Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.TermRange(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .GreaterThan("foo") + .GreaterThanOrEquals("foof") + .LessThan("bar") + .LessThanOrEquals("barb") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new TermRangeQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + GreaterThan = "foo", + GreaterThanOrEqualTo = "foof", + LessThan = "bar", + LessThanOrEqualTo = "barb" +} +---- + +[source,javascript] +.Example json output +---- +{ + "range": { + "description": { + "_name": "named_query", + "boost": 1.1, + "gt": "foo", + "gte": "foof", + "lt": "bar", + "lte": "barb" + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/regexp/regexp-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/regexp/regexp-query-usage.asciidoc new file mode 100644 index 00000000000..5d99b771e17 --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/regexp/regexp-query-usage.asciidoc @@ -0,0 +1,55 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[regexp-query-usage]] +== Regexp Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Regexp(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .Value("s.*y") + .Flags("INTERSECTION|COMPLEMENT|EMPTY") + .MaximumDeterminizedStates(20000) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new RegexpQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + Value = "s.*y", + Flags = "INTERSECTION|COMPLEMENT|EMPTY", + MaximumDeterminizedStates = 20000 +} +---- + +[source,javascript] +.Example json output +---- +{ + "regexp": { + "description": { + "_name": "named_query", + "boost": 1.1, + "flags": "INTERSECTION|COMPLEMENT|EMPTY", + "max_determinized_states": 20000, + "value": "s.*y" + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/term/term-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/term/term-query-usage.asciidoc new file mode 100644 index 00000000000..56bfff4117c --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/term/term-query-usage.asciidoc @@ -0,0 +1,49 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[term-query-usage]] +== Term Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Term(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .Value("project description") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new TermQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + Value = "project description" +} +---- + +[source,javascript] +.Example json output +---- +{ + "term": { + "description": { + "_name": "named_query", + "boost": 1.1, + "value": "project description" + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/terms/terms-list-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/terms/terms-list-query-usage.asciidoc new file mode 100644 index 00000000000..41549d983dc --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/terms/terms-list-query-usage.asciidoc @@ -0,0 +1,148 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[terms-list-query-usage]] +== Terms List Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Terms(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .DisableCoord() + .MinimumShouldMatch(MinimumShouldMatch.Fixed(2)) + .Terms(new List { "term1", "term2" }) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new TermsQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + Terms = new List { "term1", "term2" }, + DisableCoord = true, + MinimumShouldMatch = 2 +} +---- + +[source,javascript] +.Example json output +---- +{ + "terms": { + "_name": "named_query", + "boost": 1.1, + "description": [ + "term1", + "term2" + ], + "disable_coord": true, + "minimum_should_match": 2 + } +} +---- + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Terms(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .DisableCoord() + .Terms(_terms) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new TermsQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + Terms = _terms, + DisableCoord = true, +} +---- + +[source,javascript] +.Example json output +---- +{ + "terms": { + "_name": "named_query", + "boost": 1.1, + "description": [ + [ + "term1", + "term2" + ] + ], + "disable_coord": true + } +} +---- + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Terms(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.NumberOfCommits) + .DisableCoord() + .Terms(_terms) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new TermsQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "numberOfCommits", + Terms = _terms, + DisableCoord = true, +} +---- + +[source,javascript] +.Example json output +---- +{ + "terms": { + "_name": "named_query", + "boost": 1.1, + "numberOfCommits": [ + [ + "term1", + "term2" + ] + ], + "disable_coord": true + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/terms/terms-lookup-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/terms/terms-lookup-query-usage.asciidoc new file mode 100644 index 00000000000..95f1f7ad444 --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/terms/terms-lookup-query-usage.asciidoc @@ -0,0 +1,58 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[terms-lookup-query-usage]] +== Terms Lookup Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Terms(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .TermsLookup(e=>e.Path(p=>p.LastName).Id(12)) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new TermsQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + TermsLookup = new FieldLookup + { + Id = 12, + Index = Index(), + Type = Type(), + Path = Field(p=>p.LastName) + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "terms": { + "_name": "named_query", + "boost": 1.1, + "description": { + "id": 12, + "index": "devs", + "path": "lastName", + "type": "developer" + } + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/terms/terms-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/terms/terms-query-usage.asciidoc new file mode 100644 index 00000000000..84e2dcd2185 --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/terms/terms-query-usage.asciidoc @@ -0,0 +1,79 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[terms-query-usage]] +== Terms Query Usage + +Filters documents that have fields that match any of the provided terms (not analyzed). + +Be sure to read the Elasticsearch documentation on {ref_current}/query-dsl-terms-query.html[Terms query] for more information. + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Terms(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .DisableCoord() + .MinimumShouldMatch(MinimumShouldMatch.Fixed(2)) + .Terms("term1", "term2") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new TermsQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + Terms = ExpectedTerms, + DisableCoord = true, + MinimumShouldMatch = 2 +} +---- + +[source,javascript] +.Example json output +---- +{ + "terms": { + "_name": "named_query", + "boost": 1.1, + "description": [ + "term1", + "term2" + ], + "disable_coord": true, + "minimum_should_match": 2 + } +} +---- + +[[single-term-terms-query]] +[float] +== Single term Terms Query + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Terms(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .DisableCoord() + .MinimumShouldMatch(MinimumShouldMatch.Fixed(2)) + .Terms("term1") +) +---- + diff --git a/docs/asciidoc/query-dsl/term-level/type/type-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/type/type-query-usage.asciidoc new file mode 100644 index 00000000000..2bff4a45c6f --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/type/type-query-usage.asciidoc @@ -0,0 +1,45 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[type-query-usage]] +== Type Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Type(c => c + .Name("named_query") + .Boost(1.1) + .Value() +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new TypeQuery +{ + Name = "named_query", + Boost = 1.1, + Value = Type() +} +---- + +[source,javascript] +.Example json output +---- +{ + "type": { + "_name": "named_query", + "boost": 1.1, + "value": "developer" + } +} +---- + diff --git a/docs/asciidoc/query-dsl/term-level/wildcard/wildcard-query-usage.asciidoc b/docs/asciidoc/query-dsl/term-level/wildcard/wildcard-query-usage.asciidoc new file mode 100644 index 00000000000..8e23c6dcab2 --- /dev/null +++ b/docs/asciidoc/query-dsl/term-level/wildcard/wildcard-query-usage.asciidoc @@ -0,0 +1,52 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[wildcard-query-usage]] +== Wildcard Query Usage + +=== Fluent DSL Example + +[source,csharp] +---- +q +.Wildcard(c => c + .Name("named_query") + .Boost(1.1) + .Field(p => p.Description) + .Value("p*oj") + .Rewrite(RewriteMultiTerm.TopTermsBoostN) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new WildcardQuery +{ + Name = "named_query", + Boost = 1.1, + Field = "description", + Value = "p*oj", + Rewrite = RewriteMultiTerm.TopTermsBoostN +} +---- + +[source,javascript] +.Example json output +---- +{ + "wildcard": { + "description": { + "_name": "named_query", + "boost": 1.1, + "rewrite": "top_terms_boost_N", + "value": "p*oj" + } + } +} +---- + diff --git a/docs/asciidoc/search-usage.asciidoc b/docs/asciidoc/search-usage.asciidoc new file mode 100644 index 00000000000..f55b9085201 --- /dev/null +++ b/docs/asciidoc/search-usage.asciidoc @@ -0,0 +1,32 @@ +:includes-from-dirs: search/request + +include::../../docs/asciidoc/search/request/explain-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/fielddata-fields-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/fields-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/from-and-size-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/highlighting-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/index-boost-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/inner-hits-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/min-score-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/post-filter-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/profile-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/query-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/script-fields-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/sort-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/source-filtering-usage.asciidoc[] + +include::../../docs/asciidoc/search/request/suggest-usage.asciidoc[] + diff --git a/docs/asciidoc/search.asciidoc b/docs/asciidoc/search.asciidoc new file mode 100644 index 00000000000..db9b5b36bd2 --- /dev/null +++ b/docs/asciidoc/search.asciidoc @@ -0,0 +1,41 @@ +[[search]] += Search + +[partintro] +-- +NEST exposes all of the search request parameters available in Elasticsearch + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +* <> + +-- + +include::search-usage.asciidoc[] + diff --git a/docs/asciidoc/search/request/explain-usage.asciidoc b/docs/asciidoc/search/request/explain-usage.asciidoc new file mode 100644 index 00000000000..3103596166f --- /dev/null +++ b/docs/asciidoc/search/request/explain-usage.asciidoc @@ -0,0 +1,36 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[explain-usage]] +== Explain Usage + +Enables explanation for each hit on how its score was computed. + +See the Elasticsearch documentation on {ref_current}/search-explain.html[Explain] for more detail. + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Explain() +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest { Explain = true } +---- + +[source,javascript] +.Example json output +---- +{ + "explain": true +} +---- + diff --git a/docs/asciidoc/search/request/fielddata-fields-usage.asciidoc b/docs/asciidoc/search/request/fielddata-fields-usage.asciidoc new file mode 100644 index 00000000000..2071377d6a5 --- /dev/null +++ b/docs/asciidoc/search/request/fielddata-fields-usage.asciidoc @@ -0,0 +1,47 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[fielddata-fields-usage]] +== Fielddata Fields Usage + +Allows to return the field data representation of a field for each hit. + +See the Elasticsearch documentation on {ref_current}/search-request-fielddata-fields.html[Field Data Fields] for more detail. + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.FielddataFields(fs => fs + .Field(p => p.Name) + .Field(p => p.LeadDeveloper) + .Field(p => p.StartedOn) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + FielddataFields = new string [] { "name", "leadDeveloper", "startedOn" } +} +---- + +[source,javascript] +.Example json output +---- +{ + "fielddata_fields": [ + "name", + "leadDeveloper", + "startedOn" + ] +} +---- + diff --git a/docs/asciidoc/search/request/fields-usage.asciidoc b/docs/asciidoc/search/request/fields-usage.asciidoc new file mode 100644 index 00000000000..8bf8625338e --- /dev/null +++ b/docs/asciidoc/search/request/fields-usage.asciidoc @@ -0,0 +1,49 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[fields-usage]] +== Fields Usage + +Allows to selectively load specific stored fields for each document represented by a search hit. + +WARNING: The `fields` parameter is about fields that are explicitly marked as stored in the mapping, +which is off by default and generally not recommended. +Use <> instead to select subsets of the original source document to be returned. + +See the Elasticsearch documentation on {ref_current}/search-request-fields.html[Fields] for more detail. + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Fields(fs => fs + .Field(p => p.Name) + .Field(p => p.StartedOn) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Fields = Fields(p => p.Name, p => p.StartedOn) +} +---- + +[source,javascript] +.Example json output +---- +{ + "fields": [ + "name", + "startedOn" + ] +} +---- + diff --git a/docs/asciidoc/search/request/from-and-size-usage.asciidoc b/docs/asciidoc/search/request/from-and-size-usage.asciidoc new file mode 100644 index 00000000000..d3503a758ca --- /dev/null +++ b/docs/asciidoc/search/request/from-and-size-usage.asciidoc @@ -0,0 +1,46 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[from-and-size-usage]] +== From And Size Usage + +Pagination of results can be done by using the `from` and `size` parameters. + +`from` parameter:: +defines the offset from the first result you want to fetch. + +`size` parameter:: +allows you to configure the maximum amount of hits to be returned. + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + From = 10, + Size = 12 +} +---- + +[source,javascript] +.Example json output +---- +{ + "from": 10, + "size": 12 +} +---- + +=== Fluent DSL Example + +[source,csharp] +---- +s => s + .From(10) + .Size(12) +---- + diff --git a/docs/asciidoc/search/request/highlighting-usage.asciidoc b/docs/asciidoc/search/request/highlighting-usage.asciidoc new file mode 100644 index 00000000000..f577f66db5d --- /dev/null +++ b/docs/asciidoc/search/request/highlighting-usage.asciidoc @@ -0,0 +1,226 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[highlighting-usage]] +== Highlighting Usage + +Allows to highlight search results on one or more fields. +The implementation uses either the lucene `highlighter`, `fast-vector-highlighter` or `postings-highlighter`. + +See the Elasticsearch documentation on {ref_current}/search-request-highlighting.html[highlighting] for more detail. + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Query(q => q + .Match(m => m + .Field(f => f.Name.Suffix("standard")) + .Query("Upton Sons Shield Rice Rowe Roberts") + ) +) +.Highlight(h => h + .PreTags("") + .PostTags("") + .Fields( + fs => fs + .Field(p => p.Name.Suffix("standard")) + .Type(HighlighterType.Plain) + .ForceSource() + .FragmentSize(150) + .NumberOfFragments(3) + .NoMatchSize(150), + fs => fs + .Field(p => p.LeadDeveloper.FirstName) + .Type(HighlighterType.Fvh) + .PreTags("") + .PostTags("") + .HighlightQuery(q => q + .Match(m => m + .Field(p => p.LeadDeveloper.FirstName) + .Query("Kurt Edgardo Naomi Dariana Justice Felton") + ) + ), + fs => fs + .Field(p => p.State.Suffix("offsets")) + .Type(HighlighterType.Postings) + .PreTags("") + .PostTags("") + .HighlightQuery(q => q + .Terms(t => t + .Field(f => f.State.Suffix("offsets")) + .Terms( + StateOfBeing.Stable.ToString().ToLowerInvariant(), + StateOfBeing.BellyUp.ToString().ToLowerInvariant() + ) + ) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Query = new MatchQuery + { + Query = "Upton Sons Shield Rice Rowe Roberts", + Field = "name.standard" + }, + Highlight = new Highlight + { + PreTags = new[] { "" }, + PostTags = new[] { "" }, + Fields = new Dictionary + { + { "name.standard", new HighlightField + { + Type = HighlighterType.Plain, + ForceSource = true, + FragmentSize = 150, + NumberOfFragments = 3, + NoMatchSize = 150 + } + }, + { "leadDeveloper.firstName", new HighlightField + { + Type = HighlighterType.Fvh, + PreTags = new[] { ""}, + PostTags = new[] { ""}, + HighlightQuery = new MatchQuery + { + Field = "leadDeveloper.firstName", + Query = "Kurt Edgardo Naomi Dariana Justice Felton" + } + } + }, + { "state.offsets", new HighlightField + { + Type = HighlighterType.Postings, + PreTags = new[] { ""}, + PostTags = new[] { ""}, + HighlightQuery = new TermsQuery + { + Field = "state.offsets", + Terms = new [] { "stable", "bellyup" } + } + } + } + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "query": { + "match": { + "name.standard": { + "query": "Upton Sons Shield Rice Rowe Roberts" + } + } + }, + "highlight": { + "pre_tags": [ + "" + ], + "post_tags": [ + "" + ], + "fields": { + "name.standard": { + "type": "plain", + "force_source": true, + "fragment_size": 150, + "number_of_fragments": 3, + "no_match_size": 150 + }, + "leadDeveloper.firstName": { + "type": "fvh", + "pre_tags": [ + "" + ], + "post_tags": [ + "" + ], + "highlight_query": { + "match": { + "leadDeveloper.firstName": { + "query": "Kurt Edgardo Naomi Dariana Justice Felton" + } + } + } + }, + "state.offsets": { + "type": "postings", + "pre_tags": [ + "" + ], + "post_tags": [ + "" + ], + "highlight_query": { + "terms": { + "state.offsets": [ + "stable", + "bellyup" + ] + } + } + } + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); + +foreach (var highlightsByDocumentId in response.Highlights) +{ + foreach (var highlightHit in highlightsByDocumentId.Value) + { + if (highlightHit.Key == "name.standard") + { + foreach (var highlight in highlightHit.Value.Highlights) + { + highlight.Should().Contain(""); + highlight.Should().Contain(""); + } + } + else if (highlightHit.Key == "leadDeveloper.firstName") + { + foreach (var highlight in highlightHit.Value.Highlights) + { + highlight.Should().Contain(""); + highlight.Should().Contain(""); + } + } + else if (highlightHit.Key == "state.offsets") + { + foreach (var highlight in highlightHit.Value.Highlights) + { + highlight.Should().Contain(""); + highlight.Should().Contain(""); + } + } + else + { + Assert.True(false, $"highlights contains unexpected key {highlightHit.Key}"); + } + } +} +---- + diff --git a/docs/asciidoc/search/request/index-boost-usage.asciidoc b/docs/asciidoc/search/request/index-boost-usage.asciidoc new file mode 100644 index 00000000000..f872e339457 --- /dev/null +++ b/docs/asciidoc/search/request/index-boost-usage.asciidoc @@ -0,0 +1,45 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[index-boost-usage]] +== Index Boost Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.IndicesBoost(b => b + .Add("index1", 1.4) + .Add("index2", 1.3) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + IndicesBoost = new Dictionary + { + { "index1", 1.4 }, + { "index2", 1.3 } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "indices_boost": { + "index1": 1.4, + "index2": 1.3 + } +} +---- + diff --git a/docs/asciidoc/search/request/inner-hits-usage.asciidoc b/docs/asciidoc/search/request/inner-hits-usage.asciidoc new file mode 100644 index 00000000000..f6b32124226 --- /dev/null +++ b/docs/asciidoc/search/request/inner-hits-usage.asciidoc @@ -0,0 +1,173 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[inner-hits-usage]] +== Inner Hits Usage + +The {ref_current}/mapping-parent-field.html[parent/child] and {ref_current}/nested.html[nested] features allow the +return of documents that have matches in a different scope. +In the parent/child case, parent document are returned based on matches in child documents or child document +are returned based on matches in parent documents. In the nested case, documents are returned based on matches in nested inner objects. + +In both cases, the actual matches in the different scopes that caused a document to be returned is hidden. +In many cases, it’s very useful to know _which_ inner nested objects (in the case of nested) or children/parent +documents (in the case of parent/child) caused certain information to be returned. +The inner hits feature can be used for this. This feature returns per search hit in the search response additional +nested hits that caused a search hit to match in a different scope. + +Inner hits can be used by defining an `inner_hits` definition on a `nested`, `has_child` or `has_parent` query and filter. + +See the Elasticsearch documentation on {ref_current}/search-request-inner-hits.html[Inner hits] for more detail. + +[[global-inner-hits]] +[float] +== Global Inner Hits + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Index(Index) +.InnerHits(ih => ih + .Type("earls", g => g + .Size(5) + .InnerHits(iih => iih + .Type("barons") + ) + .FielddataFields(p => p.Name) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest(Index, typeof(Duke)) +{ + InnerHits = new NamedInnerHits + { + { "earls", new InnerHitsContainer + { + Type = new TypeInnerHit + { + InnerHit = new GlobalInnerHit + { + Size = 5, + FielddataFields = new Field[]{ "name" }, + InnerHits = new NamedInnerHits + { + { "barons", new TypeInnerHit() } + } + } + } + } } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "inner_hits": { + "earls": { + "type": { + "earl": { + "fielddata_fields": [ + "name" + ], + "inner_hits": { + "barons": { + "type": { + "baron": {} + } + } + }, + "size": 5 + } + } + } + } +} +---- + +[[query-inner-hits]] +[float] +== Query Inner Hits + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Index(Index) +.Query(q => + q.HasChild(hc => hc + .Query(hcq => hcq.MatchAll()) + .InnerHits(ih => ih.Name("princes")) + ) || q.Nested(n => n + .Path(p => p.Foes) + .Query(nq => nq.MatchAll()) + .InnerHits() + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest(Index, typeof(King)) +{ + Query = new HasChildQuery + { + Type = typeof(Prince), + Query = new MatchAllQuery(), + InnerHits = new InnerHits { Name = "princes" } + } || new NestedQuery + { + Path = Field(p => p.Foes), + Query = new MatchAllQuery(), + InnerHits = new InnerHits() + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "query": { + "bool": { + "should": [ + { + "has_child": { + "type": "prince", + "query": { + "match_all": {} + }, + "inner_hits": { + "name": "princes" + } + } + }, + { + "nested": { + "query": { + "match_all": {} + }, + "path": "foes", + "inner_hits": {} + } + } + ] + } + } +} +---- + diff --git a/docs/asciidoc/search/request/min-score-usage.asciidoc b/docs/asciidoc/search/request/min-score-usage.asciidoc new file mode 100644 index 00000000000..7be2b54260d --- /dev/null +++ b/docs/asciidoc/search/request/min-score-usage.asciidoc @@ -0,0 +1,50 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[min-score-usage]] +== Min Score Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.MinScore(0.5) +.Query(q => q + .Term(p => p.Name, "elasticsearch") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + MinScore = 0.5, + Query = new TermQuery + { + Field = "name", + Value = "elasticsearch" + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "min_score": 0.5, + "query": { + "term": { + "name": { + "value": "elasticsearch" + } + } + } +} +---- + diff --git a/docs/asciidoc/search/request/post-filter-usage.asciidoc b/docs/asciidoc/search/request/post-filter-usage.asciidoc new file mode 100644 index 00000000000..66a0dee434b --- /dev/null +++ b/docs/asciidoc/search/request/post-filter-usage.asciidoc @@ -0,0 +1,37 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[post-filter-usage]] +== Post Filter Usage + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest() +{ + PostFilter = new QueryContainer(new MatchAllQuery()) +} +---- + +[source,javascript] +.Example json output +---- +{ + "post_filter": { + "match_all": {} + } +} +---- + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.PostFilter(f => f.MatchAll()) +---- + diff --git a/docs/asciidoc/search/request/profile-usage.asciidoc b/docs/asciidoc/search/request/profile-usage.asciidoc new file mode 100644 index 00000000000..bc9c3e2e40b --- /dev/null +++ b/docs/asciidoc/search/request/profile-usage.asciidoc @@ -0,0 +1,61 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[profile-usage]] +== Profile Usage + +WARNING: This functionality is experimental and may be changed or removed completely in a future release. + +The Profile API provides detailed timing information about the execution of individual components in a query. +It gives the user insight into how queries are executed at a low level so that the user can understand +why certain queries are slow, and take steps to improve their slow queries. + +The output from the Profile API is very verbose, especially for complicated queries executed across many shards. +Pretty-printing the response is recommended to help understand the output + +See the Elasticsearch documentation on {ref_current}/search-profile.html[Profile API] for more detail. + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Profile() +.Query(q => q + .Term(p => p.Name, "elasticsearch") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Profile = true, + Query = new TermQuery + { + Field = "name", + Value = "elasticsearch" + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "profile": true, + "query": { + "term": { + "name": { + "value": "elasticsearch" + } + } + } +} +---- + diff --git a/docs/asciidoc/search/request/query-usage.asciidoc b/docs/asciidoc/search/request/query-usage.asciidoc new file mode 100644 index 00000000000..c570f766ef7 --- /dev/null +++ b/docs/asciidoc/search/request/query-usage.asciidoc @@ -0,0 +1,49 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[query-usage]] +== Query Usage + +The query element within the search request body allows to define a query using the <>. + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Query(q => q + .Term(p => p.Name, "elasticsearch") +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Query = new TermQuery + { + Field = "name", + Value = "elasticsearch" + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "query": { + "term": { + "name": { + "value": "elasticsearch" + } + } + } +} +---- + diff --git a/docs/asciidoc/search/request/script-fields-usage.asciidoc b/docs/asciidoc/search/request/script-fields-usage.asciidoc new file mode 100644 index 00000000000..9f38f7add9a --- /dev/null +++ b/docs/asciidoc/search/request/script-fields-usage.asciidoc @@ -0,0 +1,72 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[script-fields-usage]] +== Script Fields Usage + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.ScriptFields(sf=>sf + .ScriptField("test1", sc=>sc + .Inline("doc['my_field_name'].value * 2") + ) + .ScriptField("test2", sc=>sc + .Inline("doc['my_field_name'].value * factor") + .Params(p=>p + .Add("factor", 2.0) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + ScriptFields = new ScriptFields + { + { "test1", new ScriptField + { + Script = new InlineScript("doc['my_field_name'].value * 2") + } }, + { "test2", new InlineScript("doc['my_field_name'].value * factor") + { + Params = new FluentDictionary + { + { "factor", 2.0 } + } + } } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "script_fields": { + "test1": { + "script": { + "inline": "doc['my_field_name'].value * 2" + } + }, + "test2": { + "script": { + "inline": "doc['my_field_name'].value * factor", + "params": { + "factor": 2.0 + } + } + } + } +} +---- + diff --git a/docs/asciidoc/search/request/sort-usage.asciidoc b/docs/asciidoc/search/request/sort-usage.asciidoc new file mode 100644 index 00000000000..df7d36dd3c9 --- /dev/null +++ b/docs/asciidoc/search/request/sort-usage.asciidoc @@ -0,0 +1,168 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[sort-usage]] +== Sort Usage + +Allows to add one or more sort on specific fields. Each sort can be reversed as well. +The sort is defined on a per field level, with special field name for `_score` to sort by score. + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Sort(ss => ss + .Ascending(p => p.StartedOn) + .Descending(p => p.Name) + .Descending(SortSpecialField.Score) + .Ascending(SortSpecialField.DocumentIndexOrder) + .Field(f => f + .Field(p => p.LastActivity) + .Order(SortOrder.Descending) + .MissingLast() + .UnmappedType(FieldType.Date) + .Mode(SortMode.Average) + .NestedPath(p => p.Tags) + .NestedFilter(q => q.MatchAll()) + ) + .GeoDistance(g => g + .Field(p => p.Location) + .DistanceType(GeoDistanceType.Arc) + .Order(SortOrder.Ascending) + .Unit(DistanceUnit.Centimeters) + .Mode(SortMode.Min) + .PinTo(new GeoLocation(70, -70), new GeoLocation(-12, 12)) + ) + .Script(sc => sc + .Type("number") + .Ascending() + .Script(script => script + .Inline("doc['numberOfCommits'].value * factor") + .Params(p => p.Add("factor", 1.1)) + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Sort = new List + { + new SortField { Field = "startedOn", Order = SortOrder.Ascending }, + new SortField { Field = "name", Order = SortOrder.Descending }, + new SortField { Field = "_score", Order = SortOrder.Descending }, + new SortField { Field = "_doc", Order = SortOrder.Ascending }, + new SortField + { + Field = Field(p=>p.LastActivity), + Order = SortOrder.Descending, + Missing = "_last", + UnmappedType = FieldType.Date, + Mode = SortMode.Average, + NestedPath = Field(p=>p.Tags), + NestedFilter = new MatchAllQuery(), + }, + new GeoDistanceSort + { + Field = "location", + Order = SortOrder.Ascending, + DistanceType = GeoDistanceType.Arc, + GeoUnit = DistanceUnit.Centimeters, + Mode = SortMode.Min, + Points = new [] {new GeoLocation(70, -70), new GeoLocation(-12, 12) } + }, + new ScriptSort + { + Type = "number", + Order = SortOrder.Ascending, + Script = new InlineScript("doc['numberOfCommits'].value * factor") + { + Params = new Dictionary + { + { "factor", 1.1 } + } + } + } + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "sort": [ + { + "startedOn": { + "order": "asc" + } + }, + { + "name": { + "order": "desc" + } + }, + { + "_score": { + "order": "desc" + } + }, + { + "_doc": { + "order": "asc" + } + }, + { + "lastActivity": { + "missing": "_last", + "order": "desc", + "mode": "avg", + "nested_filter": { + "match_all": {} + }, + "nested_path": "tags", + "unmapped_type": "date" + } + }, + { + "_geo_distance": { + "location": [ + { + "lat": 70.0, + "lon": -70.0 + }, + { + "lat": -12.0, + "lon": 12.0 + } + ], + "order": "asc", + "mode": "min", + "distance_type": "arc", + "unit": "cm" + } + }, + { + "_script": { + "order": "asc", + "type": "number", + "script": { + "params": { + "factor": 1.1 + }, + "inline": "doc['numberOfCommits'].value * factor" + } + } + } + ] +} +---- + diff --git a/docs/asciidoc/search/request/source-filtering-usage.asciidoc b/docs/asciidoc/search/request/source-filtering-usage.asciidoc new file mode 100644 index 00000000000..8dd5e3ead22 --- /dev/null +++ b/docs/asciidoc/search/request/source-filtering-usage.asciidoc @@ -0,0 +1,70 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[source-filtering-usage]] +== Source Filtering Usage + +Allows to control how the `_source` field is returned with every hit. +By default operations return the contents of the `_source` field unless +you have used the fields parameter or if the `_source` field is disabled. + +See the Elasticsearch documentation on {ref_current}/search-request-source-filtering.html[Source Filtering] for more detail. + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Source(so => so + .Include(f => f + .Fields( + p => p.Name, + p => p.StartedOn + ) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Source = new SourceFilter + { + Include = Fields(p => p.Name, prop => prop.StartedOn) + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "_source": { + "include": [ + "name", + "startedOn" + ] + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +response.IsValid.Should().BeTrue(); + +foreach (var document in response.Documents) +{ + document.Name.Should().NotBeNull(); + document.StartedOn.Should().NotBe(default(DateTime)); + document.Description.Should().BeNull(); +} +---- + diff --git a/docs/asciidoc/search/request/suggest-usage.asciidoc b/docs/asciidoc/search/request/suggest-usage.asciidoc new file mode 100644 index 00000000000..15ac1e2a182 --- /dev/null +++ b/docs/asciidoc/search/request/suggest-usage.asciidoc @@ -0,0 +1,240 @@ +:ref_current: https://www.elastic.co/guide/en/elasticsearch/reference/current + +:github: https://github.com/elastic/elasticsearch-net + +:nuget: https://www.nuget.org/packages + +[[suggest-usage]] +== Suggest Usage + +The suggest feature suggests similar looking terms based on a provided text by using a suggester. + +See the Elasticsearch documentation on {ref_current}/search-suggesters.html[Suggesters] for more detail. + +=== Fluent DSL Example + +[source,csharp] +---- +s => s +.Suggest(ss => ss + .Term("my-term-suggest", t => t + .MaxEdits(1) + .MaxInspections(2) + .MaxTermFrequency(3) + .MinDocFrequency(4) + .MinWordLength(5) + .PrefixLength(6) + .SuggestMode(SuggestMode.Always) + .Analyzer("standard") + .Field(p => p.Name) + .ShardSize(7) + .Size(8) + .Text("hello world") + ) + .Completion("my-completion-suggest", c => c + .Contexts(ctxs => ctxs + .Context("color", + ctx => ctx.Context(Project.Projects.First().Suggest.Contexts.Values.SelectMany(v => v).First()) + ) + ) + .Fuzzy(f => f + .Fuzziness(Fuzziness.Auto) + .MinLength(1) + .PrefixLength(2) + .Transpositions() + .UnicodeAware(false) + ) + .Analyzer("simple") + .Field(p => p.Suggest) + .Size(8) + .Prefix(Project.Instance.Name) + .Payload(fs => fs.Field(p => p.NumberOfCommits)) + ) + .Phrase("my-phrase-suggest", ph => ph + .Collate(c => c + .Query(q => q + .Inline("{ \"match\": { \"{{field_name}}\": \"{{suggestion}}\" }}") + .Params(p => p.Add("field_name", "title")) + ) + .Prune() + ) + .Confidence(10.1) + .DirectGenerator(d => d + .Field(p => p.Description) + ) + .GramSize(1) + .Field(p => p.Name) + .Text("hello world") + .RealWordErrorLikelihood(0.5) + ) +) +---- + +=== Object Initializer Syntax Example + +[source,csharp] +---- +new SearchRequest +{ + Suggest = new SuggestContainer + { + { "my-term-suggest", new SuggestBucket + { + Text = "hello world", + Term = new TermSuggester + { + MaxEdits = 1, + MaxInspections = 2, + MaxTermFrequency = 3, + MinDocFrequency = 4, + MinWordLength = 5, + PrefixLength = 6, + SuggestMode = SuggestMode.Always, + Analyzer = "standard", + Field = Field(p=>p.Name), + ShardSize = 7, + Size = 8 + } + } }, + { "my-completion-suggest", new SuggestBucket + { + Prefix = Project.Instance.Name, + Completion = new CompletionSuggester + { + Contexts = new Dictionary> + { + { "color", new List { new SuggestContextQuery { Context = Project.Projects.First().Suggest.Contexts.Values.SelectMany(v => v).First() } } } + }, + Fuzzy = new FuzzySuggester + { + Fuzziness = Fuzziness.Auto, + MinLength = 1, + PrefixLength = 2, + Transpositions = true, + UnicodeAware = false + }, + Analyzer = "simple", + Field = Field(p=>p.Suggest), + Size = 8, + Payload = Fields("numberOfCommits") + } + } }, + { "my-phrase-suggest", new SuggestBucket + { + Text = "hello world", + Phrase = new PhraseSuggester + { + Collate = new PhraseSuggestCollate + { + Query = new InlineScript("{ \"match\": { \"{{field_name}}\": \"{{suggestion}}\" }}") + { + Params = new Dictionary + { + { "field_name", "title" } + } + }, + Prune = true + }, + Confidence = 10.1, + DirectGenerator = new List + { + new DirectGenerator { Field = "description" } + }, + GramSize = 1, + Field = "name", + RealWordErrorLikelihood = 0.5 + } + } }, + } +} +---- + +[source,javascript] +.Example json output +---- +{ + "suggest": { + "my-completion-suggest": { + "completion": { + "analyzer": "simple", + "contexts": { + "color": [ + { + "context": "red" + } + ] + }, + "field": "suggest", + "fuzzy": { + "fuzziness": "AUTO", + "min_length": 1, + "prefix_length": 2, + "transpositions": true, + "unicode_aware": false + }, + "size": 8, + "payload": [ + "numberOfCommits" + ] + }, + "prefix": "Durgan LLC" + }, + "my-phrase-suggest": { + "phrase": { + "collate": { + "query": { + "inline": "{ \"match\": { \"{{field_name}}\": \"{{suggestion}}\" }}", + "params": { + "field_name": "title" + } + }, + "prune": true + }, + "confidence": 10.1, + "direct_generator": [ + { + "field": "description" + } + ], + "field": "name", + "gram_size": 1, + "real_word_error_likelihood": 0.5 + }, + "text": "hello world" + }, + "my-term-suggest": { + "term": { + "analyzer": "standard", + "field": "name", + "max_edits": 1, + "max_inspections": 2, + "max_term_freq": 3.0, + "min_doc_freq": 4.0, + "min_word_length": 5, + "prefix_length": 6, + "shard_size": 7, + "size": 8, + "suggest_mode": "always" + }, + "text": "hello world" + } + } +} +---- + +=== Handling Responses + +[source,csharp] +---- +var myCompletionSuggest = response.Suggest["my-completion-suggest"]; +myCompletionSuggest.Should().NotBeNull(); +var suggest = myCompletionSuggest.First(); +suggest.Text.Should().Be(Project.Instance.Name); +suggest.Length.Should().BeGreaterThan(0); +var option = suggest.Options.First(); +option.Text.Should().NotBeNullOrEmpty(); +option.Score.Should().BeGreaterThan(0); +option.Payload.Should().NotBeNull(); +option.Payload.Value("numberOfCommits").Should().BeGreaterThan(0); +---- + diff --git a/docs/asciidoc/timeoutplot.png b/docs/asciidoc/timeoutplot.png new file mode 100644 index 00000000000..ceb819bff0b Binary files /dev/null and b/docs/asciidoc/timeoutplot.png differ diff --git a/paket.dependencies b/paket.dependencies index 750cb99bbb3..9deb90f83dc 100644 --- a/paket.dependencies +++ b/paket.dependencies @@ -26,6 +26,10 @@ nuget Rx-Linq nuget Rx-Main nuget Rx-PlatformServices +source https://api.nuget.org/v3/index.json + +nuget AsciiDocNet + group build source https://www.nuget.org/api/v2 diff --git a/paket.lock b/paket.lock index 5133eaa023e..194f3d2f343 100644 --- a/paket.lock +++ b/paket.lock @@ -1,4 +1,7 @@ NUGET + remote: http://api.nuget.org/v3/index.json + specs: + AsciiDocNet (1.0.0-alpha2) remote: https://www.nuget.org/api/v2 specs: Bogus (3.0.5-beta-2) @@ -16,13 +19,13 @@ NUGET System.Collections (>= 4.0.10) - framework: dnxcore50 System.Diagnostics.Debug (>= 4.0.10) - framework: dnxcore50 System.Globalization (>= 4.0.10) - framework: dnxcore50 - System.Linq (>= 4.0.0) - framework: dnxcore50 + System.Linq (>= 4.0) - framework: dnxcore50 System.Linq.Expressions (>= 4.0.10) - framework: dnxcore50 System.ObjectModel (>= 4.0.10) - framework: dnxcore50 System.Reflection (>= 4.0.10) - framework: dnxcore50 - System.Reflection.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Primitives (>= 4.0.0) - framework: dnxcore50 - System.Reflection.TypeExtensions (>= 4.0.0) - framework: dnxcore50 + System.Reflection.Extensions (>= 4.0) - framework: dnxcore50 + System.Reflection.Primitives (>= 4.0) - framework: dnxcore50 + System.Reflection.TypeExtensions (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 System.Runtime.Extensions (>= 4.0.10) - framework: dnxcore50 System.Text.RegularExpressions (>= 4.0.10) - framework: dnxcore50 @@ -31,12 +34,12 @@ NUGET System.Xml.XDocument (>= 4.0.10) - framework: dnxcore50 Humanizer (1.37.7) JetBrains.Profiler.Kernel.Windows.Api (104.0.20151218.125646) - Wave (4.0.0) - Microsoft.CodeAnalysis.Analyzers (1.1.0) + Wave (4.0) + Microsoft.CodeAnalysis.Analyzers (1.1) Microsoft.CodeAnalysis.Common (1.1.1) - Microsoft.CodeAnalysis.Analyzers (>= 1.1.0) + Microsoft.CodeAnalysis.Analyzers (>= 1.1) System.Collections.Immutable (>= 1.1.37) - System.Reflection.Metadata (>= 1.1.0) + System.Reflection.Metadata (>= 1.1) Microsoft.CodeAnalysis.CSharp (1.1.1) Microsoft.CodeAnalysis.Common (1.1.1) NDesk.Options (0.2.1) @@ -56,231 +59,231 @@ NUGET Rx-PlatformServices (2.2.5) Rx-Core (>= 2.2.5) Rx-Interfaces (>= 2.2.5) - ShellProgressBar (1.2.0) + ShellProgressBar (1.2) System.Collections (4.0.10) - framework: dnxcore50 - System.Diagnostics.Debug (>= 4.0.0) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Diagnostics.Debug (>= 4.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 - System.Runtime.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Threading (>= 4.0.0) - framework: dnxcore50 + System.Runtime.Extensions (>= 4.0) - framework: dnxcore50 + System.Threading (>= 4.0) - framework: dnxcore50 System.Collections.Immutable (1.1.37) - System.Collections (>= 4.0.0) - framework: dnxcore50 - System.Diagnostics.Debug (>= 4.0.0) - framework: dnxcore50 - System.Globalization (>= 4.0.0) - framework: dnxcore50 - System.Linq (>= 4.0.0) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 - System.Runtime.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Threading (>= 4.0.0) - framework: dnxcore50 + System.Collections (>= 4.0) - framework: dnxcore50 + System.Diagnostics.Debug (>= 4.0) - framework: dnxcore50 + System.Globalization (>= 4.0) - framework: dnxcore50 + System.Linq (>= 4.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 + System.Runtime.Extensions (>= 4.0) - framework: dnxcore50 + System.Threading (>= 4.0) - framework: dnxcore50 System.ComponentModel (4.0.1-beta-23516) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 - System.Diagnostics.Contracts (4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Diagnostics.Contracts (4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Diagnostics.Debug (4.0.10) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Dynamic.Runtime (4.0.11-beta-23516) - framework: dnxcore50 System.Collections (>= 4.0.10) - framework: dnxcore50 System.Diagnostics.Debug (>= 4.0.10) - framework: dnxcore50 System.Globalization (>= 4.0.10) - framework: dnxcore50 - System.Linq (>= 4.0.0) - framework: dnxcore50 - System.Linq.Expressions (>= 4.0.0) - framework: dnxcore50 + System.Linq (>= 4.0) - framework: dnxcore50 + System.Linq.Expressions (>= 4.0) - framework: dnxcore50 System.Linq.Expressions (>= 4.0.10) - framework: dnxcore50 - System.ObjectModel (>= 4.0.0) - framework: dnxcore50 + System.ObjectModel (>= 4.0) - framework: dnxcore50 System.ObjectModel (>= 4.0.10) - framework: dnxcore50 - System.Reflection (>= 4.0.0) - framework: dnxcore50 + System.Reflection (>= 4.0) - framework: dnxcore50 System.Reflection (>= 4.0.10) - framework: dnxcore50 - System.Reflection.Emit (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Emit.ILGeneration (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Primitives (>= 4.0.0) - framework: dnxcore50 - System.Reflection.TypeExtensions (>= 4.0.0) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Reflection.Emit (>= 4.0) - framework: dnxcore50 + System.Reflection.Emit.ILGeneration (>= 4.0) - framework: dnxcore50 + System.Reflection.Primitives (>= 4.0) - framework: dnxcore50 + System.Reflection.TypeExtensions (>= 4.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 System.Runtime.Extensions (>= 4.0.10) - framework: dnxcore50 System.Threading (>= 4.0.10) - framework: dnxcore50 System.Globalization (4.0.10) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.IO (4.0.11-beta-23516) - framework: dnxcore50 - System.Globalization (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Globalization (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 - System.Text.Encoding (>= 4.0.0) - framework: dnxcore50 + System.Text.Encoding (>= 4.0) - framework: dnxcore50 System.Text.Encoding (>= 4.0.10) - framework: dnxcore50 - System.Text.Encoding.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Threading (>= 4.0.0) - framework: dnxcore50 - System.Threading.Tasks (>= 4.0.0) - framework: dnxcore50 - System.IO.FileSystem (4.0.0) - framework: dnxcore50 + System.Text.Encoding.Extensions (>= 4.0) - framework: dnxcore50 + System.Threading (>= 4.0) - framework: dnxcore50 + System.Threading.Tasks (>= 4.0) - framework: dnxcore50 + System.IO.FileSystem (4.0) - framework: dnxcore50 System.Collections (>= 4.0.10) - framework: dnxcore50 System.Diagnostics.Debug (>= 4.0.10) - framework: dnxcore50 - System.IO (>= 4.0.0) - framework: dnxcore50 + System.IO (>= 4.0) - framework: dnxcore50 System.IO (>= 4.0.10) - framework: dnxcore50 - System.IO.FileSystem.Primitives (>= 4.0.0) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.IO.FileSystem.Primitives (>= 4.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 System.Runtime.Extensions (>= 4.0.10) - framework: dnxcore50 - System.Runtime.Handles (>= 4.0.0) - framework: dnxcore50 + System.Runtime.Handles (>= 4.0) - framework: dnxcore50 System.Runtime.InteropServices (>= 4.0.20) - framework: dnxcore50 - System.Runtime.WindowsRuntime (>= 4.0.0) - framework: dnxcore50 - System.Text.Encoding (>= 4.0.0) - framework: dnxcore50 + System.Runtime.WindowsRuntime (>= 4.0) - framework: dnxcore50 + System.Text.Encoding (>= 4.0) - framework: dnxcore50 System.Text.Encoding (>= 4.0.10) - framework: dnxcore50 System.Text.Encoding.Extensions (>= 4.0.10) - framework: dnxcore50 System.Threading (>= 4.0.10) - framework: dnxcore50 - System.Threading.Overlapped (>= 4.0.0) - framework: dnxcore50 - System.Threading.Tasks (>= 4.0.0) - framework: dnxcore50 + System.Threading.Overlapped (>= 4.0) - framework: dnxcore50 + System.Threading.Tasks (>= 4.0) - framework: dnxcore50 System.Threading.Tasks (>= 4.0.10) - framework: dnxcore50 - System.IO.FileSystem.Primitives (4.0.0) - framework: dnxcore50 + System.IO.FileSystem.Primitives (4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 - System.Linq (4.0.0) - framework: dnxcore50 + System.Linq (4.0) - framework: dnxcore50 System.Collections (>= 4.0.10) - framework: dnxcore50 System.Diagnostics.Debug (>= 4.0.10) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 System.Runtime.Extensions (>= 4.0.10) - framework: dnxcore50 System.Linq.Expressions (4.0.10) - framework: dnxcore50 - System.Collections (>= 4.0.0) - framework: dnxcore50 - System.Diagnostics.Debug (>= 4.0.0) - framework: dnxcore50 - System.Globalization (>= 4.0.0) - framework: dnxcore50 - System.IO (>= 4.0.0) - framework: dnxcore50 - System.Linq (>= 4.0.0) - framework: dnxcore50 - System.ObjectModel (>= 4.0.0) - framework: dnxcore50 - System.Reflection (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Emit (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Primitives (>= 4.0.0) - framework: dnxcore50 - System.Reflection.TypeExtensions (>= 4.0.0) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Collections (>= 4.0) - framework: dnxcore50 + System.Diagnostics.Debug (>= 4.0) - framework: dnxcore50 + System.Globalization (>= 4.0) - framework: dnxcore50 + System.IO (>= 4.0) - framework: dnxcore50 + System.Linq (>= 4.0) - framework: dnxcore50 + System.ObjectModel (>= 4.0) - framework: dnxcore50 + System.Reflection (>= 4.0) - framework: dnxcore50 + System.Reflection.Emit (>= 4.0) - framework: dnxcore50 + System.Reflection.Extensions (>= 4.0) - framework: dnxcore50 + System.Reflection.Primitives (>= 4.0) - framework: dnxcore50 + System.Reflection.TypeExtensions (>= 4.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 - System.Runtime.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Threading (>= 4.0.0) - framework: dnxcore50 + System.Runtime.Extensions (>= 4.0) - framework: dnxcore50 + System.Threading (>= 4.0) - framework: dnxcore50 System.ObjectModel (4.0.10) - framework: dnxcore50 System.Collections (>= 4.0.10) - framework: dnxcore50 System.Diagnostics.Debug (>= 4.0.10) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 System.Threading (>= 4.0.10) - framework: dnxcore50 - System.Private.Uri (4.0.0) - framework: dnxcore50 + System.Private.Uri (4.0) - framework: dnxcore50 System.Reflection (4.0.10) - framework: dnxcore50 - System.IO (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Primitives (>= 4.0.0) - framework: dnxcore50 + System.IO (>= 4.0) - framework: dnxcore50 + System.Reflection.Primitives (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 - System.Reflection.Emit (4.0.0) - framework: dnxcore50 - System.IO (>= 4.0.0) - framework: dnxcore50 - System.Reflection (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Emit.ILGeneration (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Primitives (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Emit.ILGeneration (4.0.0) - framework: dnxcore50 - System.Reflection (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Primitives (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Reflection.Emit (4.0) - framework: dnxcore50 + System.IO (>= 4.0) - framework: dnxcore50 + System.Reflection (>= 4.0) - framework: dnxcore50 + System.Reflection.Emit.ILGeneration (>= 4.0) - framework: dnxcore50 + System.Reflection.Primitives (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 + System.Reflection.Emit.ILGeneration (4.0) - framework: dnxcore50 + System.Reflection (>= 4.0) - framework: dnxcore50 + System.Reflection.Primitives (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Reflection.Extensions (4.0.1-beta-23516) - framework: dnxcore50 System.Diagnostics.Debug (>= 4.0.10) - framework: dnxcore50 - System.Reflection (>= 4.0.0) - framework: dnxcore50 + System.Reflection (>= 4.0) - framework: dnxcore50 System.Reflection (>= 4.0.10) - framework: dnxcore50 - System.Reflection.Primitives (>= 4.0.0) - framework: dnxcore50 + System.Reflection.Primitives (>= 4.0) - framework: dnxcore50 System.Reflection.TypeExtensions (>= 4.1.0-beta-23516) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 System.Runtime.Extensions (>= 4.0.10) - framework: dnxcore50 - System.Reflection.Metadata (1.1.0) - System.Collections (>= 4.0.0) - framework: dnxcore50 + System.Reflection.Metadata (1.1) + System.Collections (>= 4.0) - framework: dnxcore50 System.Collections.Immutable (>= 1.1.37) - framework: dnxcore50, portable-net45+win80 - System.Diagnostics.Debug (>= 4.0.0) - framework: dnxcore50 - System.IO (>= 4.0.0) - framework: dnxcore50 - System.Reflection (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Primitives (>= 4.0.0) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 - System.Runtime.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Runtime.InteropServices (>= 4.0.0) - framework: dnxcore50 - System.Text.Encoding (>= 4.0.0) - framework: dnxcore50 - System.Text.Encoding.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Threading (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Primitives (4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 - System.Threading (>= 4.0.0) - framework: dnxcore50 + System.Diagnostics.Debug (>= 4.0) - framework: dnxcore50 + System.IO (>= 4.0) - framework: dnxcore50 + System.Reflection (>= 4.0) - framework: dnxcore50 + System.Reflection.Extensions (>= 4.0) - framework: dnxcore50 + System.Reflection.Primitives (>= 4.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 + System.Runtime.Extensions (>= 4.0) - framework: dnxcore50 + System.Runtime.InteropServices (>= 4.0) - framework: dnxcore50 + System.Text.Encoding (>= 4.0) - framework: dnxcore50 + System.Text.Encoding.Extensions (>= 4.0) - framework: dnxcore50 + System.Threading (>= 4.0) - framework: dnxcore50 + System.Reflection.Primitives (4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 + System.Threading (>= 4.0) - framework: dnxcore50 System.Reflection.TypeExtensions (4.1.0-beta-23516) - framework: dnxcore50 - System.Diagnostics.Contracts (>= 4.0.0) - framework: dnxcore50 + System.Diagnostics.Contracts (>= 4.0) - framework: dnxcore50 System.Diagnostics.Debug (>= 4.0.10) - framework: dnxcore50 - System.Linq (>= 4.0.0) - framework: dnxcore50 - System.Reflection (>= 4.0.0) - framework: dnxcore50 + System.Linq (>= 4.0) - framework: dnxcore50 + System.Reflection (>= 4.0) - framework: dnxcore50 System.Reflection (>= 4.0.10) - framework: dnxcore50 - System.Reflection.Primitives (>= 4.0.0) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Reflection.Primitives (>= 4.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 System.Runtime.Extensions (>= 4.0.10) - framework: dnxcore50 - System.Resources.ResourceManager (4.0.0) - framework: dnxcore50 - System.Globalization (>= 4.0.0) - framework: dnxcore50 - System.Reflection (>= 4.0.0) - framework: dnxcore50 + System.Resources.ResourceManager (4.0) - framework: dnxcore50 + System.Globalization (>= 4.0) - framework: dnxcore50 + System.Reflection (>= 4.0) - framework: dnxcore50 System.Reflection (>= 4.0.10) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 System.Runtime (4.0.20) - framework: dnxcore50 - System.Private.Uri (>= 4.0.0) - framework: dnxcore50 + System.Private.Uri (>= 4.0) - framework: dnxcore50 System.Runtime.Extensions (4.0.10) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 - System.Runtime.Handles (4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Runtime.Handles (4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Runtime.InteropServices (4.0.20) - framework: dnxcore50 - System.Reflection (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Primitives (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 - System.Runtime.Handles (>= 4.0.0) - framework: dnxcore50 + System.Reflection (>= 4.0) - framework: dnxcore50 + System.Reflection.Primitives (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 + System.Runtime.Handles (>= 4.0) - framework: dnxcore50 System.Runtime.WindowsRuntime (4.0.10) - framework: dnxcore50 System.Diagnostics.Debug (>= 4.0.10) - framework: dnxcore50 - System.Globalization (>= 4.0.0) - framework: dnxcore50 - System.IO (>= 4.0.0) - framework: dnxcore50 + System.Globalization (>= 4.0) - framework: dnxcore50 + System.IO (>= 4.0) - framework: dnxcore50 System.IO (>= 4.0.10) - framework: dnxcore50 - System.ObjectModel (>= 4.0.0) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.ObjectModel (>= 4.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 - System.Runtime.Extensions (>= 4.0.0) - framework: dnxcore50 + System.Runtime.Extensions (>= 4.0) - framework: dnxcore50 System.Runtime.InteropServices (>= 4.0.20) - framework: dnxcore50 System.Threading (>= 4.0.10) - framework: dnxcore50 - System.Threading.Tasks (>= 4.0.0) - framework: dnxcore50 + System.Threading.Tasks (>= 4.0) - framework: dnxcore50 System.Threading.Tasks (>= 4.0.10) - framework: dnxcore50 System.Text.Encoding (4.0.11-beta-23516) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Text.Encoding.Extensions (4.0.10) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Text.Encoding (>= 4.0.10) - framework: dnxcore50 System.Text.RegularExpressions (4.0.11-beta-23516) - framework: dnxcore50 System.Collections (>= 4.0.10) - framework: dnxcore50 System.Globalization (>= 4.0.10) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 System.Runtime.Extensions (>= 4.0.10) - framework: dnxcore50 System.Threading (>= 4.0.10) - framework: dnxcore50 System.Threading (4.0.10) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 - System.Threading.Tasks (>= 4.0.0) - framework: dnxcore50 - System.Threading.Overlapped (4.0.0) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 + System.Threading.Tasks (>= 4.0) - framework: dnxcore50 + System.Threading.Overlapped (4.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 - System.Runtime.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Runtime.Handles (>= 4.0.0) - framework: dnxcore50 + System.Runtime.Extensions (>= 4.0) - framework: dnxcore50 + System.Runtime.Handles (>= 4.0) - framework: dnxcore50 System.Runtime.InteropServices (>= 4.0.20) - framework: dnxcore50 System.Threading (>= 4.0.10) - framework: dnxcore50 System.Threading.Tasks (4.0.10) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 System.Xml.ReaderWriter (4.0.10) - framework: dnxcore50 System.Collections (>= 4.0.10) - framework: dnxcore50 System.Diagnostics.Debug (>= 4.0.10) - framework: dnxcore50 System.Globalization (>= 4.0.10) - framework: dnxcore50 System.IO (>= 4.0.10) - framework: dnxcore50 - System.IO.FileSystem (>= 4.0.0) - framework: dnxcore50 - System.IO.FileSystem.Primitives (>= 4.0.0) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 + System.IO.FileSystem (>= 4.0) - framework: dnxcore50 + System.IO.FileSystem.Primitives (>= 4.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 System.Runtime.Extensions (>= 4.0.10) - framework: dnxcore50 System.Runtime.InteropServices (>= 4.0.20) - framework: dnxcore50 @@ -294,74 +297,74 @@ NUGET System.Globalization (>= 4.0.10) - framework: dnxcore50 System.IO (>= 4.0.10) - framework: dnxcore50 System.Reflection (>= 4.0.10) - framework: dnxcore50 - System.Resources.ResourceManager (>= 4.0.0) - framework: dnxcore50 + System.Resources.ResourceManager (>= 4.0) - framework: dnxcore50 System.Runtime (>= 4.0.20) - framework: dnxcore50 System.Runtime.Extensions (>= 4.0.10) - framework: dnxcore50 System.Text.Encoding (>= 4.0.10) - framework: dnxcore50 System.Threading (>= 4.0.10) - framework: dnxcore50 System.Xml.ReaderWriter (>= 4.0.10) - framework: dnxcore50 - Wave (4.0.0) - xunit (2.1.0) - xunit.assert (2.1.0) - xunit.core (2.1.0) - xunit.abstractions (2.0.0) - framework: >= net45, dnx451, dnxcore50, monoandroid, monotouch, portable-net45+win80+wp80+wpa81, xamarinios, winv4.5, wpv8.0 - xunit.assert (2.1.0) - System.Collections (>= 4.0.0) - framework: dnxcore50 - System.Diagnostics.Debug (>= 4.0.0) - framework: dnxcore50 - System.Globalization (>= 4.0.0) - framework: dnxcore50 - System.Linq (>= 4.0.0) - framework: dnxcore50 - System.ObjectModel (>= 4.0.0) - framework: dnxcore50 - System.Reflection (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 - System.Runtime.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Text.RegularExpressions (>= 4.0.0) - framework: dnxcore50 - System.Threading.Tasks (>= 4.0.0) - framework: dnxcore50 - xunit.core (2.1.0) - System.Collections (>= 4.0.0) - framework: dnxcore50 - System.Diagnostics.Debug (>= 4.0.0) - framework: dnxcore50 - System.Globalization (>= 4.0.0) - framework: dnxcore50 - System.Linq (>= 4.0.0) - framework: dnxcore50 - System.Reflection (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 - System.Runtime.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Threading.Tasks (>= 4.0.0) - framework: dnxcore50 - xunit.abstractions (>= 2.0.0) - framework: dnxcore50 - xunit.extensibility.core (2.1.0) - framework: >= net45, dnx451, dnxcore50, monoandroid, monotouch, portable-net45+win80+wp80+wpa81, xamarinios, winv4.5, wpv8.0 - xunit.extensibility.execution (2.1.0) - framework: >= net45, dnx451, dnxcore50, monoandroid, monotouch, portable-net45+win80+wp80+wpa81, xamarinios, winv4.5, wpv8.0 - xunit.extensibility.core (2.1.0) - framework: >= net45, dnx451, dnxcore50, monoandroid, monotouch, portable-net45+win80+wp80+wpa81, xamarinios, winv4.5, wpv8.0 - xunit.abstractions (2.0.0) - xunit.extensibility.execution (2.1.0) - framework: >= net45, dnx451, dnxcore50, monoandroid, monotouch, portable-net45+win80+wp80+wpa81, xamarinios, winv4.5, wpv8.0 - System.Collections (>= 4.0.0) - framework: dnxcore50 - System.Diagnostics.Debug (>= 4.0.0) - framework: dnxcore50 - System.Globalization (>= 4.0.0) - framework: dnxcore50 - System.IO (>= 4.0.0) - framework: dnxcore50 - System.Linq (>= 4.0.0) - framework: dnxcore50 - System.Linq.Expressions (>= 4.0.0) - framework: dnxcore50 - System.Reflection (>= 4.0.0) - framework: dnxcore50 - System.Reflection.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Runtime (>= 4.0.0) - framework: dnxcore50 - System.Runtime.Extensions (>= 4.0.0) - framework: dnxcore50 - System.Text.Encoding (>= 4.0.0) - framework: dnxcore50 - System.Threading (>= 4.0.0) - framework: dnxcore50 - System.Threading.Tasks (>= 4.0.0) - framework: dnxcore50 - xunit.abstractions (>= 2.0.0) - framework: dnxcore50 - xunit.extensibility.core (2.1.0) - framework: >= net45, dnx451, dnxcore50, monoandroid, monotouch, xamarinios, winv4.5, wpv8.0 + Wave (4.0) + xunit (2.1) + xunit.assert (2.1) + xunit.core (2.1) + xunit.abstractions (2.0) - framework: >= net45, dnx451, dnxcore50, monoandroid, monotouch, portable-net45+win80+wp80+wpa81, xamarinios, winv4.5, wpv8.0, wpav8.1 + xunit.assert (2.1) + System.Collections (>= 4.0) - framework: dnxcore50 + System.Diagnostics.Debug (>= 4.0) - framework: dnxcore50 + System.Globalization (>= 4.0) - framework: dnxcore50 + System.Linq (>= 4.0) - framework: dnxcore50 + System.ObjectModel (>= 4.0) - framework: dnxcore50 + System.Reflection (>= 4.0) - framework: dnxcore50 + System.Reflection.Extensions (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 + System.Runtime.Extensions (>= 4.0) - framework: dnxcore50 + System.Text.RegularExpressions (>= 4.0) - framework: dnxcore50 + System.Threading.Tasks (>= 4.0) - framework: dnxcore50 + xunit.core (2.1) + System.Collections (>= 4.0) - framework: dnxcore50 + System.Diagnostics.Debug (>= 4.0) - framework: dnxcore50 + System.Globalization (>= 4.0) - framework: dnxcore50 + System.Linq (>= 4.0) - framework: dnxcore50 + System.Reflection (>= 4.0) - framework: dnxcore50 + System.Reflection.Extensions (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 + System.Runtime.Extensions (>= 4.0) - framework: dnxcore50 + System.Threading.Tasks (>= 4.0) - framework: dnxcore50 + xunit.abstractions (>= 2.0) - framework: dnxcore50 + xunit.extensibility.core (2.1) - framework: >= net45, dnx451, dnxcore50, monoandroid, monotouch, portable-net45+win80+wp80+wpa81, xamarinios, winv4.5, wpv8.0, wpav8.1 + xunit.extensibility.execution (2.1) - framework: >= net45, dnx451, dnxcore50, monoandroid, monotouch, portable-net45+win80+wp80+wpa81, xamarinios, winv4.5, wpv8.0, wpav8.1 + xunit.extensibility.core (2.1) - framework: >= net45, dnx451, dnxcore50, monoandroid, monotouch, portable-net45+win80+wp80+wpa81, xamarinios, winv4.5, wpv8.0, wpav8.1 + xunit.abstractions (2.0) + xunit.extensibility.execution (2.1) - framework: >= net45, dnx451, dnxcore50, monoandroid, monotouch, portable-net45+win80+wp80+wpa81, xamarinios, winv4.5, wpv8.0, wpav8.1 + System.Collections (>= 4.0) - framework: dnxcore50 + System.Diagnostics.Debug (>= 4.0) - framework: dnxcore50 + System.Globalization (>= 4.0) - framework: dnxcore50 + System.IO (>= 4.0) - framework: dnxcore50 + System.Linq (>= 4.0) - framework: dnxcore50 + System.Linq.Expressions (>= 4.0) - framework: dnxcore50 + System.Reflection (>= 4.0) - framework: dnxcore50 + System.Reflection.Extensions (>= 4.0) - framework: dnxcore50 + System.Runtime (>= 4.0) - framework: dnxcore50 + System.Runtime.Extensions (>= 4.0) - framework: dnxcore50 + System.Text.Encoding (>= 4.0) - framework: dnxcore50 + System.Threading (>= 4.0) - framework: dnxcore50 + System.Threading.Tasks (>= 4.0) - framework: dnxcore50 + xunit.abstractions (>= 2.0) - framework: dnxcore50 + xunit.extensibility.core (2.1) - framework: >= net45, dnx451, dnxcore50, monoandroid, monotouch, xamarinios, winv4.5, wpv8.0, wpav8.1 GROUP build NUGET remote: https://www.nuget.org/api/v2 specs: - FAKE (4.19.0) + FAKE (4.19) FSharp.Data (2.2.5) - Zlib.Portable (>= 1.10.0) - framework: portable-net40+sl50+wp80+win80 + Zlib.Portable (>= 1.10) - framework: portable-net40+sl50+wp80+win80 gitlink (2.3.0-unstable0022) - Node.js (5.3.0) - NoGit (0.1.0) - Node.js (>= 0.12.0) + Node.js (5.3) + NoGit (0.1) + Node.js (>= 0.12) Npm (3.5.2) Node.js (>= 0.12.7) - NoGit (>= 0.1.0) - xunit.runner.console (2.1.0) - Zlib.Portable (1.11.0) - framework: portable-net40+sl50+wp80+win80 + NoGit (>= 0.1) + xunit.runner.console (2.1) + Zlib.Portable (1.11) - framework: portable-net40+sl50+wp80+win80 diff --git a/src/CodeGeneration/Nest.Litterateur/AsciiDoc/GeneratedAsciidocVisitor.cs b/src/CodeGeneration/Nest.Litterateur/AsciiDoc/GeneratedAsciidocVisitor.cs new file mode 100644 index 00000000000..d806b913773 --- /dev/null +++ b/src/CodeGeneration/Nest.Litterateur/AsciiDoc/GeneratedAsciidocVisitor.cs @@ -0,0 +1,302 @@ +#if !DOTNETCORE +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; +using System.Text.RegularExpressions; +using System.Threading.Tasks; +using AsciiDocNet; + +namespace Nest.Litterateur.AsciiDoc +{ + /// + /// Visits the "raw" asciidoc generated using Roslyn and adds attribute entries, + /// section titles, rearranges sections, etc. + /// + public class GeneratedAsciidocVisitor : NoopVisitor + { + private static readonly Dictionary Ids = new Dictionary(); + + private readonly FileInfo _destination; + private Document _newDocument; + private bool _topLevel = true; + + public GeneratedAsciidocVisitor(FileInfo destination) + { + _destination = destination; + } + + public Document Convert(Document document) + { + document.Accept(this); + return _newDocument; + } + + public override void Visit(Document document) + { + _newDocument = new Document + { + Title = document.Title, + DocType = document.DocType + }; + + foreach (var authorInfo in document.Authors) + { + _newDocument.Authors.Add(authorInfo); + } + + RemoveDocDirectoryAttribute(_newDocument); + RemoveDocDirectoryAttribute(document); + + foreach (var attributeEntry in document.Attributes) + { + _newDocument.Attributes.Add(attributeEntry); + } + + if (!document.Attributes.Any(a => a.Name == "ref_current")) + { + _newDocument.Attributes.Add(new AttributeEntry("ref_current", "https://www.elastic.co/guide/en/elasticsearch/reference/current")); + } + + if (!document.Attributes.Any(a => a.Name == "github")) + { + _newDocument.Attributes.Add(new AttributeEntry("github", "https://github.com/elastic/elasticsearch-net")); + } + + if (!document.Attributes.Any(a => a.Name == "nuget")) + { + _newDocument.Attributes.Add(new AttributeEntry("nuget", "https://www.nuget.org/packages")); + } + + // see if the document has some kind of top level title and add one with an anchor if not. + if (document.Title == null && document.Count > 0) + { + var sectionTitle = document[0] as SectionTitle; + + if (sectionTitle == null || sectionTitle.Level != 2) + { + var id = Path.GetFileNameWithoutExtension(_destination.Name); + var title = id.LowercaseHyphenToPascal(); + sectionTitle = new SectionTitle(title, 2); + sectionTitle.Attributes.Add(new Anchor(id)); + + _newDocument.Add(sectionTitle); + } + } + + base.Visit(document); + } + + public override void Visit(Container elements) + { + if (_topLevel) + { + _topLevel = false; + Source exampleJson = null; + Source objectInitializerExample = null; + + for (int index = 0; index < elements.Count; index++) + { + var element = elements[index]; + var source = element as Source; + + if (source != null) + { + // remove empty source blocks + if (string.IsNullOrWhiteSpace(source.Text)) + { + continue; + } + + var method = source.Attributes.OfType().FirstOrDefault(a => a.Name == "method"); + if (method == null) + { + _newDocument.Add(element); + continue; + } + + if ((method.Value == "expectjson" || method.Value == "queryjson") && + source.Attributes.Count > 1 && + source.Attributes[1].Name == "javascript" && + _destination.Name != "writing-aggregations.asciidoc") + { + exampleJson = source; + continue; + } + + // if there is a section title since the last source block, don't add one + var lastSourceBlock = _newDocument.LastOrDefault(e => e is Source); + var lastSectionTitle = _newDocument.OfType().LastOrDefault(e => e.Level == 3); + if (lastSourceBlock != null && lastSectionTitle != null) + { + var lastSectionTitleIndex = _newDocument.IndexOf(lastSectionTitle); + var lastSourceBlockIndex = _newDocument.IndexOf(lastSourceBlock); + if (lastSectionTitleIndex > lastSourceBlockIndex) + { + _newDocument.Add(element); + continue; + } + } + + switch (method.Value) + { + case "fluent": + case "queryfluent": + if (!LastSectionTitleMatches(text => text.StartsWith("Fluent DSL", StringComparison.OrdinalIgnoreCase))) + { + _newDocument.Add(new SectionTitle("Fluent DSL Example", 3)); + } + + _newDocument.Add(element); + + if (objectInitializerExample != null) + { + _newDocument.Add(new SectionTitle("Object Initializer Syntax Example", 3)); + _newDocument.Add(objectInitializerExample); + objectInitializerExample = null; + + if (exampleJson != null) + { + _newDocument.Add(exampleJson); + exampleJson = null; + } + } + break; + case "initializer": + _newDocument.Add(new SectionTitle("Object Initializer Syntax Example", 3)); + _newDocument.Add(element); + // Move the example json to after the initializer example + if (exampleJson != null) + { + _newDocument.Add(exampleJson); + exampleJson = null; + } + break; + case "queryinitializer": + if (objectInitializerExample != null) + { + _newDocument.Add(new SectionTitle("Object Initializer Syntax Example", 3)); + _newDocument.Add(objectInitializerExample); + + // Move the example json to after the initializer example + if (exampleJson != null) + { + _newDocument.Add(exampleJson); + exampleJson = null; + } + } + else + { + objectInitializerExample = source; + } + break; + case "expectresponse": + // Don't add the Handlng Response section title if it was the last title (it might be defined in the doc already) + if (!LastSectionTitleMatches(text => text.Equals("Handling Responses", StringComparison.OrdinalIgnoreCase))) + { + _newDocument.Add(new SectionTitle("Handling Responses", 3)); + } + + _newDocument.Add(element); + break; + default: + _newDocument.Add(element); + break; + } + } + else + { + _newDocument.Add(element); + } + } + } + + base.Visit(elements); + } + + public override void Visit(Source source) + { + if (source.Attributes.Count > 1 && + source.Attributes[1].Name == "javascript" && + !source.Attributes.HasTitle) + { + source.Attributes.Add(new Title("Example json output")); + } + + // remove method attributes as the elastic doc generation doesn't like them; it + // expects a linenumbering in the index 2 position of a source block + var methodAttribute = source.Attributes.FirstOrDefault(a => a.Name == "method"); + if (methodAttribute != null) + { + source.Attributes.Remove(methodAttribute); + } + + // Replace tabs with spaces and remove C# comment escaping from callouts + // (elastic docs generation does not like this callout format) + source.Text = Regex.Replace(source.Text.Replace("\t", " "), @"//[ \t]*\<(\d+)\>.*", "<$1>"); + + base.Visit(source); + } + + public override void Visit(SectionTitle sectionTitle) + { + if (sectionTitle.Level != 2) + { + base.Visit(sectionTitle); + return; + } + + // Generate an anchor for all Level 2 section titles + if (!sectionTitle.Attributes.HasAnchor) + { + var builder = new StringBuilder(); + using (var writer = new AsciiDocVisitor(new StringWriter(builder))) + { + writer.Visit(sectionTitle.Elements); + } + + var title = builder.ToString().PascalToHyphen(); + sectionTitle.Attributes.Add(new Anchor(title)); + } + + // Check for duplicate ids across documents + var key = sectionTitle.Attributes.Anchor.Id; + string existingFile; + if (Ids.TryGetValue(key, out existingFile)) + { + throw new Exception($"duplicate id {key} in {_destination.FullName}. Id already exists in {existingFile}"); + } + + Ids.Add(key, _destination.FullName); + base.Visit(sectionTitle); + } + + private void RemoveDocDirectoryAttribute(Document document) + { + var directoryAttribute = document.Attributes.FirstOrDefault(a => a.Name == "docdir"); + if (directoryAttribute != null) + { + document.Attributes.Remove(directoryAttribute); + } + } + + private bool LastSectionTitleMatches(Func predicate) + { + var lastSectionTitle = _newDocument.OfType().LastOrDefault(e => e.Level == 3); + if (lastSectionTitle != null && lastSectionTitle.Level == 3) + { + var builder = new StringBuilder(); + using (var visitor = new AsciiDocVisitor(new StringWriter(builder))) + { + visitor.Visit(lastSectionTitle.Elements); + } + + return predicate(builder.ToString()); + } + + return false; + } + } +} +#endif diff --git a/src/CodeGeneration/Nest.Litterateur/AsciiDoc/RawAsciidocVisitor.cs b/src/CodeGeneration/Nest.Litterateur/AsciiDoc/RawAsciidocVisitor.cs new file mode 100644 index 00000000000..e9a0a97989e --- /dev/null +++ b/src/CodeGeneration/Nest.Litterateur/AsciiDoc/RawAsciidocVisitor.cs @@ -0,0 +1,100 @@ +#if !DOTNETCORE +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using AsciiDocNet; + +namespace Nest.Litterateur.AsciiDoc +{ + /// + /// Visits raw asciidoc files (i.e. not generated) to make modifications + /// + public class RawAsciidocVisitor : NoopVisitor + { + private readonly FileInfo _destination; + + private static readonly Dictionary IncludeDirectories = new Dictionary + { + { "aggregations.asciidoc", "aggregations-usage.asciidoc" }, + { "query-dsl.asciidoc", "query-dsl-usage.asciidoc" }, + { "search.asciidoc", "search-usage.asciidoc" }, + }; + + public RawAsciidocVisitor(FileInfo destination) + { + _destination = destination; + } + + public override void Visit(Document document) + { + var directoryAttribute = document.Attributes.FirstOrDefault(a => a.Name == "docdir"); + if (directoryAttribute != null) + { + document.Attributes.Remove(directoryAttribute); + } + + // check if this document has generated includes to other files + var includeAttribute = document.Attributes.FirstOrDefault(a => a.Name == "includes-from-dirs"); + + if (includeAttribute != null) + { + var thisFileUri = new Uri(_destination.FullName); + var directories = includeAttribute.Value.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries); + + foreach (var directory in directories) + { + foreach (var file in Directory.EnumerateFiles(Path.Combine(Program.OutputDirPath, directory), "*.asciidoc", SearchOption.AllDirectories)) + { + var fileInfo = new FileInfo(file); + var referencedFileUri = new Uri(fileInfo.FullName); + var relativePath = thisFileUri.MakeRelativeUri(referencedFileUri); + var include = new Include(relativePath.OriginalString); + + document.Add(include); + } + } + } + + base.Visit(document); + } + + public override void Visit(Open open) + { + // include links to all the query dsl usage and aggregation usage pages on the landing query dsl and aggregations pages, respectively. + string usageFilePath; + if (IncludeDirectories.TryGetValue(_destination.Name, out usageFilePath)) + { + var usageDoc = Document.Load(Path.Combine(Program.OutputDirPath, usageFilePath)); + + var includeAttribute = usageDoc.Attributes.FirstOrDefault(a => a.Name == "includes-from-dirs"); + + if (includeAttribute != null) + { + var directories = includeAttribute.Value.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries); + + var list = new UnorderedList(); + + foreach (var directory in directories) + { + foreach (var file in Directory.EnumerateFiles(Path.Combine(Program.OutputDirPath, directory), "*usage.asciidoc", SearchOption.AllDirectories)) + { + var fileInfo = new FileInfo(file); + var fileNameWithoutExtension = Path.GetFileNameWithoutExtension(fileInfo.Name); + + list.Items.Add(new UnorderedListItem + { + new Paragraph(new InternalAnchor(fileNameWithoutExtension, fileNameWithoutExtension.LowercaseHyphenToPascal())) + }); + } + } + + open.Add(list); + } + } + + base.Visit(open); + } + } +} +#endif diff --git a/src/CodeGeneration/Nest.Litterateur/Documentation/Blocks/CodeBlock.cs b/src/CodeGeneration/Nest.Litterateur/Documentation/Blocks/CodeBlock.cs index f305486bf12..c94d317fb4d 100644 --- a/src/CodeGeneration/Nest.Litterateur/Documentation/Blocks/CodeBlock.cs +++ b/src/CodeGeneration/Nest.Litterateur/Documentation/Blocks/CodeBlock.cs @@ -1,13 +1,44 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text.RegularExpressions; + namespace Nest.Litterateur.Documentation.Blocks { public class CodeBlock : IDocumentationBlock { - public string Value { get; } - public int LineNumber { get; } - public CodeBlock(string lineOfCode, int lineNumber) + public CodeBlock(string lineOfCode, int lineNumber, Language language, string propertyOrMethodName) { - Value = lineOfCode.Trim(); + Value = ExtractCallOutsFromText(lineOfCode); LineNumber = lineNumber; + Language = language; + PropertyName = propertyOrMethodName?.ToLowerInvariant(); + } + + public List CallOuts { get; } = new List(); + + public Language Language { get; set; } + + public int LineNumber { get; } + + public string PropertyName { get; set; } + + public string Value { get; } + + private string ExtractCallOutsFromText(string lineOfCode) + { + var matches = Regex.Matches(lineOfCode, @"//[ \t]*(?\<\d+\>)[ \t]*(?\S.*)"); + foreach (Match match in matches) + { + CallOuts.Add($"{match.Groups["callout"].Value} {match.Groups["text"].Value}"); + } + + if (CallOuts.Any()) + { + lineOfCode = Regex.Replace(lineOfCode, @"//[ \t]*\<(\d+)\>.*", "//<$1>"); + } + + return lineOfCode.Trim(); } } } \ No newline at end of file diff --git a/src/CodeGeneration/Nest.Litterateur/Documentation/Blocks/CombinedBlock.cs b/src/CodeGeneration/Nest.Litterateur/Documentation/Blocks/CombinedBlock.cs index a40927f0fc2..ac880574611 100644 --- a/src/CodeGeneration/Nest.Litterateur/Documentation/Blocks/CombinedBlock.cs +++ b/src/CodeGeneration/Nest.Litterateur/Documentation/Blocks/CombinedBlock.cs @@ -4,7 +4,7 @@ namespace Nest.Litterateur.Documentation.Blocks { /// /// Used to keep a line of code (could be multiple e.g fluent syntax) and its annotations in one logical unit. - /// So they do not suffer from reoordering based on line number when writing out the documentation + /// So they do not suffer from reordering based on line number when writing out the documentation /// public class CombinedBlock : IDocumentationBlock { diff --git a/src/CodeGeneration/Nest.Litterateur/Documentation/Files/CSharpDocumentationFile.cs b/src/CodeGeneration/Nest.Litterateur/Documentation/Files/CSharpDocumentationFile.cs index 2905f4f8a89..cb30fa58045 100644 --- a/src/CodeGeneration/Nest.Litterateur/Documentation/Files/CSharpDocumentationFile.cs +++ b/src/CodeGeneration/Nest.Litterateur/Documentation/Files/CSharpDocumentationFile.cs @@ -1,4 +1,5 @@ -using System.Collections.Generic; +using System; +using System.Collections.Generic; using System.IO; using System.Linq; using System.Text; @@ -6,49 +7,134 @@ using Nest.Litterateur.Documentation.Blocks; using Nest.Litterateur.Walkers; +#if !DOTNETCORE +using AsciiDocNet; +using Nest.Litterateur.AsciiDoc; +#endif + namespace Nest.Litterateur.Documentation.Files { public class CSharpDocumentationFile : DocumentationFile { - internal CSharpDocumentationFile(FileInfo fileLocation) : base(fileLocation) { } + internal CSharpDocumentationFile(FileInfo fileLocation) : base(fileLocation) + { + } - private string RenderBlocksToDocumentation(IEnumerable blocks, StringBuilder builder = null) + private string RenderBlocksToDocumentation(IEnumerable blocks) + { + var builder = new StringBuilder(); + var lastBlockWasCodeBlock = false; + var callouts = new List(); + Language? language = null; + string propertyOrMethodName = null; + + RenderBlocksToDocumentation(blocks, builder, ref lastBlockWasCodeBlock, ref callouts, ref language, ref propertyOrMethodName); + if (lastBlockWasCodeBlock) + { + builder.AppendLine("----"); + foreach (var callout in callouts) + { + builder.AppendLine(callout); + } + } + return builder.ToString(); + } + + private void RenderBlocksToDocumentation( + IEnumerable blocks, + StringBuilder builder, + ref bool lastBlockWasCodeBlock, + ref List callouts, + ref Language? language, + ref string propertyOrMethodName) { - var sb = builder ?? new StringBuilder(); foreach (var block in blocks) { if (block is TextBlock) { - sb.AppendLine(block.Value); + if (lastBlockWasCodeBlock) + { + lastBlockWasCodeBlock = false; + builder.AppendLine("----"); + if (callouts.Any()) + { + foreach (var callout in callouts) + { + builder.AppendLine(callout); + } + builder.AppendLine(); + callouts = new List(); + } + } + + builder.AppendLine(block.Value); } else if (block is CodeBlock) { - sb.AppendLine("[source, csharp]"); - sb.AppendLine("----"); - sb.AppendLine(block.Value); - sb.AppendLine("----"); + var codeBlock = (CodeBlock)block; + + // don't write different language code blocks in the same delimited source block + if (lastBlockWasCodeBlock && (codeBlock.Language != language || codeBlock.PropertyName != propertyOrMethodName)) + { + lastBlockWasCodeBlock = false; + builder.AppendLine("----"); + if (callouts.Any()) + { + foreach (var callout in callouts) + { + builder.AppendLine(callout); + } + builder.AppendLine(); + callouts = new List(); + } + } + + if (!lastBlockWasCodeBlock) + { + builder.AppendLine($"[source,{codeBlock.Language.ToString().ToLowerInvariant()},method=\"{codeBlock.PropertyName ?? "unknown"}\"]"); + builder.AppendLine("----"); + } + else + { + builder.AppendLine(); + } + + builder.AppendLine(codeBlock.Value); + + // add call outs here to write out when closing the block + callouts.AddRange(codeBlock.CallOuts); + lastBlockWasCodeBlock = true; + language = codeBlock.Language; + propertyOrMethodName = codeBlock.PropertyName; } else if (block is CombinedBlock) { - RenderBlocksToDocumentation(MergeAdjacentCodeBlocks(((CombinedBlock)block).Blocks), sb); + var mergedBlocks = MergeAdjacentCodeBlocks(((CombinedBlock)block).Blocks); + RenderBlocksToDocumentation(mergedBlocks, builder, ref lastBlockWasCodeBlock, ref callouts, ref language, ref propertyOrMethodName); } } - return sb.ToString(); } private List MergeAdjacentCodeBlocks(IEnumerable unmergedBlocks) { var blocks = new List(); List collapseCodeBlocks = null; + List collapseCallouts = null; int lineNumber = 0; + Language? language = null; + string propertyOrMethodName = null; + foreach (var b in unmergedBlocks) { - //if current block is not a code block and we;ve been collapsing code blocks - //at this point close that buffre and add a new codeblock + //if current block is not a code block and we've been collapsing code blocks + //at this point close that buffer and add a new codeblock if (!(b is CodeBlock) && collapseCodeBlocks != null) { - blocks.Add(new CodeBlock(string.Join("\r\n", collapseCodeBlocks), lineNumber)); + var block = new CodeBlock(string.Join(Environment.NewLine, collapseCodeBlocks), lineNumber, language.Value, propertyOrMethodName); + block.CallOuts.AddRange(collapseCallouts); + blocks.Add(block); collapseCodeBlocks = null; + collapseCallouts = null; } //if not a codeblock simply add it to the final list @@ -57,23 +143,42 @@ private List MergeAdjacentCodeBlocks(IEnumerable(); - collapseCodeBlocks.Add(b.Value); - lineNumber = b.LineNumber; + if (collapseCallouts == null) collapseCallouts = new List(); + + var codeBlock = (CodeBlock)b; + + if ((language != null && codeBlock.Language != language) || + (propertyOrMethodName != null && codeBlock.PropertyName != propertyOrMethodName)) + { + blocks.Add(codeBlock); + continue; + } + + language = codeBlock.Language; + propertyOrMethodName = codeBlock.PropertyName; + collapseCodeBlocks.Add(codeBlock.Value); + collapseCallouts.AddRange(codeBlock.CallOuts); + + lineNumber = codeBlock.LineNumber; } //make sure we flush our code buffer if (collapseCodeBlocks != null) - blocks.Add(new CodeBlock(string.Join("\r\n", collapseCodeBlocks), lineNumber)); + { + var joinedCodeBlock = new CodeBlock(string.Join(Environment.NewLine, collapseCodeBlocks), lineNumber, language.Value, propertyOrMethodName); + joinedCodeBlock.CallOuts.AddRange(collapseCallouts); + blocks.Add(joinedCodeBlock); + } return blocks; } public override void SaveToDocumentationFolder() { var code = File.ReadAllText(this.FileLocation.FullName); - var ast = CSharpSyntaxTree.ParseText(code); + var walker = new DocumentationFileWalker(); walker.Visit(ast.GetRoot()); var blocks = walker.Blocks.OrderBy(b => b.LineNumber).ToList(); @@ -81,9 +186,31 @@ public override void SaveToDocumentationFolder() var mergedBlocks = MergeAdjacentCodeBlocks(blocks); var body = this.RenderBlocksToDocumentation(mergedBlocks); + var docFile = this.CreateDocumentationLocation(); + +#if !DOTNETCORE + CleanDocumentAndWriteToFile(body, docFile); +#else + File.WriteAllText(docFile.FullName, body); +#endif + } + +#if !DOTNETCORE + private void CleanDocumentAndWriteToFile(string body, FileInfo docFile) + { + // tidy up the asciidoc + var document = Document.Parse(body); - var docFileName = this.CreateDocumentationLocation(); - File.WriteAllText(docFileName.FullName, body); + var visitor = new GeneratedAsciidocVisitor(docFile); + document = visitor.Convert(document); + + // add attributes and write to destination + using (var file = new StreamWriter(docFile.FullName)) + { + + document.Accept(new AsciiDocVisitor(file)); + } } +#endif } } diff --git a/src/CodeGeneration/Nest.Litterateur/Documentation/Files/DocumentationFile.cs b/src/CodeGeneration/Nest.Litterateur/Documentation/Files/DocumentationFile.cs index 4d8faf0aa90..381cbf949db 100644 --- a/src/CodeGeneration/Nest.Litterateur/Documentation/Files/DocumentationFile.cs +++ b/src/CodeGeneration/Nest.Litterateur/Documentation/Files/DocumentationFile.cs @@ -1,6 +1,9 @@ using System; +using System.Collections.Generic; using System.IO; +using System.Reflection.Emit; using System.Text.RegularExpressions; +using Nest.Litterateur; namespace Nest.Litterateur.Documentation.Files { @@ -26,7 +29,9 @@ public static DocumentationFile Load(FileInfo fileLocation) return new CSharpDocumentationFile(fileLocation); case ".gif": case ".jpg": + case ".jpeg": case ".png": + return new ImageDocumentationFile(fileLocation); case ".asciidoc": return new RawDocumentationFile(fileLocation); } @@ -37,13 +42,19 @@ public static DocumentationFile Load(FileInfo fileLocation) protected virtual FileInfo CreateDocumentationLocation() { var testFullPath = this.FileLocation.FullName; - var testInDocumenationFolder = Regex.Replace(testFullPath, @"(^.+\\Tests\\|\" + this.Extension + "$)", "") + ".asciidoc"; - var documenationTargetPath = Path.GetFullPath(Path.Combine(Program.OutputFolder, testInDocumenationFolder)); - var fileInfo = new FileInfo(documenationTargetPath); + var testInDocumentationFolder = + Regex.Replace(testFullPath, @"(^.+\\Tests\\|\" + this.Extension + "$)", "") + .TrimEnd(".doc") + .TrimEnd("Tests") + .PascalToHyphen() + ".asciidoc"; + + var documentationTargetPath = Path.GetFullPath(Path.Combine(Program.OutputDirPath, testInDocumentationFolder)); + var fileInfo = new FileInfo(documentationTargetPath); if (fileInfo.Directory != null) Directory.CreateDirectory(fileInfo.Directory.FullName); + return fileInfo; - } + } } } \ No newline at end of file diff --git a/src/CodeGeneration/Nest.Litterateur/Documentation/Files/ImageDocumentationFile.cs b/src/CodeGeneration/Nest.Litterateur/Documentation/Files/ImageDocumentationFile.cs new file mode 100644 index 00000000000..ee958349255 --- /dev/null +++ b/src/CodeGeneration/Nest.Litterateur/Documentation/Files/ImageDocumentationFile.cs @@ -0,0 +1,36 @@ +using System.IO; +using System.Text.RegularExpressions; + +namespace Nest.Litterateur.Documentation.Files +{ + public class ImageDocumentationFile : DocumentationFile + { + public ImageDocumentationFile(FileInfo fileLocation) : base(fileLocation) { } + + public override void SaveToDocumentationFolder() + { + var docFileName = this.CreateDocumentationLocation(); + + // copy for asciidoc to work (path is relative to file) + this.FileLocation.CopyTo(docFileName.FullName, true); + + // copy to the root as well, for the doc generation process (path is relative to root) + this.FileLocation.CopyTo(Path.Combine(Program.OutputDirPath, docFileName.Name), true); + } + + protected override FileInfo CreateDocumentationLocation() + { + var testFullPath = this.FileLocation.FullName; + + var testInDocumenationFolder = Regex.Replace(testFullPath, @"(^.+\\Tests\\|\" + this.Extension + "$)", "") + .PascalToHyphen() + this.Extension; + + var documentationTargetPath = Path.GetFullPath(Path.Combine(Program.OutputDirPath, testInDocumenationFolder)); + + var fileInfo = new FileInfo(documentationTargetPath); + if (fileInfo.Directory != null) + Directory.CreateDirectory(fileInfo.Directory.FullName); + return fileInfo; + } + } +} diff --git a/src/CodeGeneration/Nest.Litterateur/Documentation/Files/RawDocumentationFile.cs b/src/CodeGeneration/Nest.Litterateur/Documentation/Files/RawDocumentationFile.cs index eb1a4d01550..26461e3436b 100644 --- a/src/CodeGeneration/Nest.Litterateur/Documentation/Files/RawDocumentationFile.cs +++ b/src/CodeGeneration/Nest.Litterateur/Documentation/Files/RawDocumentationFile.cs @@ -1,5 +1,11 @@ +using System; using System.IO; +using System.Linq; using System.Text.RegularExpressions; +#if !DOTNETCORE +using AsciiDocNet; +using Nest.Litterateur.AsciiDoc; +#endif namespace Nest.Litterateur.Documentation.Files { @@ -11,19 +17,34 @@ public override void SaveToDocumentationFolder() { //we simply do a copy of the markdown file var docFileName = this.CreateDocumentationLocation(); + +#if !DOTNETCORE + var document = Document.Load(FileLocation.FullName); + + // make any modifications + var rawVisitor = new RawAsciidocVisitor(FileLocation); + document.Accept(rawVisitor); + + // write out asciidoc to file + using (var visitor = new AsciiDocVisitor(docFileName.FullName)) + { + document.Accept(visitor); + } +#else this.FileLocation.CopyTo(docFileName.FullName, true); +#endif } protected override FileInfo CreateDocumentationLocation() { var testFullPath = this.FileLocation.FullName; - var testInDocumenationFolder = Regex.Replace(testFullPath, @"(^.+\\Tests\\|\" + this.Extension + "$)", "") + this.Extension; + var testInDocumenationFolder = Regex.Replace(testFullPath, @"(^.+\\Tests\\|\" + this.Extension + "$)", "").PascalToHyphen() + this.Extension; - var documenationTargetPath = Path.GetFullPath(Path.Combine(Program.OutputFolder, testInDocumenationFolder)); + var documenationTargetPath = Path.GetFullPath(Path.Combine(Program.OutputDirPath, testInDocumenationFolder)); var fileInfo = new FileInfo(documenationTargetPath); if (fileInfo.Directory != null) Directory.CreateDirectory(fileInfo.Directory.FullName); return fileInfo; } } -} \ No newline at end of file +} diff --git a/src/CodeGeneration/Nest.Litterateur/EnumerableExtensions.cs b/src/CodeGeneration/Nest.Litterateur/EnumerableExtensions.cs index 7f67ed50c2f..9c82fffdb04 100644 --- a/src/CodeGeneration/Nest.Litterateur/EnumerableExtensions.cs +++ b/src/CodeGeneration/Nest.Litterateur/EnumerableExtensions.cs @@ -1,5 +1,4 @@ -using System; -using System.Collections.Generic; +using System.Collections.Generic; using System.Linq; namespace Nest.Litterateur diff --git a/src/CodeGeneration/Nest.Litterateur/Language.cs b/src/CodeGeneration/Nest.Litterateur/Language.cs new file mode 100644 index 00000000000..07dea31177f --- /dev/null +++ b/src/CodeGeneration/Nest.Litterateur/Language.cs @@ -0,0 +1,8 @@ +namespace Nest.Litterateur +{ + public enum Language + { + CSharp, + JavaScript + } +} \ No newline at end of file diff --git a/src/CodeGeneration/Nest.Litterateur/Linker/Linker.cs b/src/CodeGeneration/Nest.Litterateur/Linker/Linker.cs deleted file mode 100644 index 80578ed3a1b..00000000000 --- a/src/CodeGeneration/Nest.Litterateur/Linker/Linker.cs +++ /dev/null @@ -1,16 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; - -namespace Nest.Litterateur.Linker -{ - /// - /// Goes over the generated docs, does heuristical touchups and writes outs ascii docs links at the bottom of files - /// - public class Linker - { - - } -} diff --git a/src/CodeGeneration/Nest.Litterateur/LitUp.cs b/src/CodeGeneration/Nest.Litterateur/LitUp.cs index d4256b43960..17d7588dd59 100644 --- a/src/CodeGeneration/Nest.Litterateur/LitUp.cs +++ b/src/CodeGeneration/Nest.Litterateur/LitUp.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using System.Diagnostics; using System.IO; using System.Linq; using Nest.Litterateur.Documentation.Files; @@ -9,8 +10,9 @@ namespace Nest.Litterateur public static class LitUp { private static readonly string[] SkipFolders = { "Nest.Tests.Literate", "Debug", "Release" }; - public static IEnumerable InputFiles(string extension) => - from f in Directory.GetFiles(Program.InputFolder, $"*.{extension}", SearchOption.AllDirectories) + + public static IEnumerable InputFiles(string path) => + from f in Directory.GetFiles(Program.InputDirPath, $"{path}", SearchOption.AllDirectories) let dir = new DirectoryInfo(f) where dir?.Parent != null && !SkipFolders.Contains(dir.Parent.Name) select DocumentationFile.Load(new FileInfo(f)); @@ -19,17 +21,29 @@ public static IEnumerable> Input { get { - yield return InputFiles("doc.cs"); - yield return InputFiles("asciidoc"); - yield return InputFiles("ping"); - yield return InputFiles("gif"); + yield return InputFiles("*.doc.cs"); + yield return InputFiles("*UsageTests.cs"); + yield return InputFiles("*.png"); + yield return InputFiles("*.gif"); + yield return InputFiles("*.jpg"); + // process asciidocs last as they may have generated + // includes to other output asciidocs + yield return InputFiles("*.asciidoc"); } } public static void Go(string[] args) { - foreach (var file in Input.SelectMany(s=>s)) + foreach (var file in Input.SelectMany(s => s)) + { file.SaveToDocumentationFolder(); + } + +#if !DOTNETCORE + if (Debugger.IsAttached) + Console.WriteLine("Press any key to continue..."); + Console.ReadKey(); +#endif } } } \ No newline at end of file diff --git a/src/CodeGeneration/Nest.Litterateur/Nest.Litterateur.csproj b/src/CodeGeneration/Nest.Litterateur/Nest.Litterateur.csproj index 07c7e16e3fb..81b95ef4d94 100644 --- a/src/CodeGeneration/Nest.Litterateur/Nest.Litterateur.csproj +++ b/src/CodeGeneration/Nest.Litterateur/Nest.Litterateur.csproj @@ -39,18 +39,23 @@ + + + - + + + @@ -65,6 +70,17 @@ --> + + + + + ..\..\..\packages\AsciiDocNet\lib\net45\AsciiDocNet.dll + True + True + + + + True @@ -113,6 +129,62 @@ + + + + + ..\..\..\packages\Newtonsoft.Json\lib\net35\Newtonsoft.Json.dll + True + True + + + + + + + ..\..\..\packages\Newtonsoft.Json\lib\net20\Newtonsoft.Json.dll + True + True + + + + + + + ..\..\..\packages\Newtonsoft.Json\lib\net40\Newtonsoft.Json.dll + True + True + + + + + + + ..\..\..\packages\Newtonsoft.Json\lib\net45\Newtonsoft.Json.dll + True + True + + + + + + + ..\..\..\packages\Newtonsoft.Json\lib\portable-net45+wp80+win8+wpa81+dnxcore50\Newtonsoft.Json.dll + True + True + + + + + + + ..\..\..\packages\Newtonsoft.Json\lib\portable-net40+sl5+wp80+win8+wpa81\Newtonsoft.Json.dll + True + True + + + + @@ -135,4 +207,4 @@ - + \ No newline at end of file diff --git a/src/CodeGeneration/Nest.Litterateur/Program.cs b/src/CodeGeneration/Nest.Litterateur/Program.cs index 8ecdf2ab64d..5e42aa21701 100644 --- a/src/CodeGeneration/Nest.Litterateur/Program.cs +++ b/src/CodeGeneration/Nest.Litterateur/Program.cs @@ -1,32 +1,27 @@ using System.IO; -using Nest.Litterateur.Documentation; namespace Nest.Litterateur { public static class Program { - private static string DefaultTestFolder; - private static string DefaultDocFolder; - static Program() { var currentDirectory = new DirectoryInfo(Directory.GetCurrentDirectory()); if (currentDirectory.Name == "Nest.Litterateur" && currentDirectory.Parent.Name == "CodeGeneration") { - DefaultTestFolder = @"..\..\Tests"; - DefaultDocFolder = @"..\..\..\docs\asciidoc"; + InputDirPath = @"..\..\Tests"; + OutputDirPath = @"..\..\..\docs\asciidoc"; } else { - DefaultTestFolder = @"..\..\..\..\..\src\Tests"; - DefaultDocFolder = @"..\..\..\..\..\docs\asciidoc"; + InputDirPath = @"..\..\..\..\..\src\Tests"; + OutputDirPath = @"..\..\..\..\..\docs\asciidoc"; } } - public static string InputFolder => DefaultTestFolder; + public static string InputDirPath { get; } - - public static string OutputFolder => DefaultDocFolder; + public static string OutputDirPath { get; } static void Main(string[] args) => LitUp.Go(args); } diff --git a/src/CodeGeneration/Nest.Litterateur/StringExtensions.cs b/src/CodeGeneration/Nest.Litterateur/StringExtensions.cs new file mode 100644 index 00000000000..3dc5bc265af --- /dev/null +++ b/src/CodeGeneration/Nest.Litterateur/StringExtensions.cs @@ -0,0 +1,220 @@ +using System; +using System.Collections.Generic; +using System.ComponentModel; +using System.IO; +using System.Linq; +using System.Reflection; +using System.Text; +using System.Text.RegularExpressions; +using Microsoft.CodeAnalysis; +using Microsoft.CodeAnalysis.CSharp; +using Newtonsoft.Json; + +namespace Nest.Litterateur +{ + public static class StringExtensions + { + public static string PascalToHyphen(this string input) + { + if (string.IsNullOrEmpty(input)) return string.Empty; + + return Regex.Replace( + Regex.Replace( + Regex.Replace(input, @"([A-Z]+)([A-Z][a-z])", "$1-$2"), @"([a-z\d])([A-Z])", "$1-$2") + , @"[-\s]+", "-").TrimEnd('-').ToLower(); + } + + public static string LowercaseHyphenToPascal(this string lowercaseHyphenatedInput) + { + return Regex.Replace(lowercaseHyphenatedInput.Replace("-", " "), @"\b([a-z])", m => m.Captures[0].Value.ToUpper()); + } + + public static string TrimEnd(this string input, string trim) + { + if (string.IsNullOrEmpty(input)) return string.Empty; + + return input.EndsWith(trim, StringComparison.OrdinalIgnoreCase) + ? input.Substring(0, input.Length - trim.Length) + : input; + } + + public static string RemoveLeadingAndTrailingMultiLineComments(this string input) + { + var match = Regex.Match(input, @"^(?[ \t]*\/\*)"); + + if (match.Success) + { + input = input.Substring(match.Groups["value"].Value.Length); + } + + match = Regex.Match(input, @"(?\*\/[ \t]*)$"); + + if (match.Success) + { + input = input.Substring(0, input.Length - match.Groups["value"].Value.Length); + } + + return input; + } + + public static string RemoveLeadingSpacesAndAsterisk(this string input) + { + var match = Regex.Match(input, @"^(?[ \t]*\*\s?).*"); + + if (match.Success) + { + input = input.Substring(match.Groups["value"].Value.Length); + } + + return input; + } + + public static string RemoveNumberOfLeadingTabsAfterNewline(this string input, int numberOfTabs) + { + var firstTab = input.IndexOf("\t", StringComparison.OrdinalIgnoreCase); + + if (firstTab == -1) + { + return input; + } + int count = 0; + char firstNonTabCharacter = Char.MinValue; + + for (int i = firstTab; i < input.Length; i++) + { + if (input[i] != '\t') + { + firstNonTabCharacter = input[i]; + count = i - firstTab; + break; + } + } + + if (firstNonTabCharacter == '{' && numberOfTabs != count) + { + numberOfTabs = count; + } + + return Regex.Replace( + Regex.Replace( + input, + $"(?[\n|\r\n]+\t{{{numberOfTabs}}})", + m => m.Value.Replace("\t", string.Empty) + ), + $"(?[\n|\r\n]+\\s{{{numberOfTabs * 4}}})", + m => m.Value.Replace(" ", string.Empty) + ); + } + + public static string[] SplitOnNewLines(this string input, StringSplitOptions options) + { + return input.Split(new[] { "\r\n", "\n" }, options); + } + +#if !DOTNETCORE + // TODO: Hack of replacements in anonymous types that represent json. This can be resolved by referencing tests assembly when building the dynamic assembly, + // but might want to put doc generation at same directory level as Tests to reference project directly. + private static Dictionary Substitutions = new Dictionary + { + { "FixedDate", "new DateTime(2015, 06, 06, 12, 01, 02, 123)" }, + { "FirstNameToFind", "\"pierce\"" }, + { "Project.Projects.First().Suggest.Context.Values.SelectMany(v => v).First()", "\"red\"" }, + { "Project.Projects.First().Suggest.Contexts.Values.SelectMany(v => v).First()", "\"red\"" }, + { "Project.Instance.Name", "\"Durgan LLC\"" }, + { "Project.InstanceAnonymous", "new {name = \"Koch, Collier and Mohr\", state = \"BellyUp\",startedOn = " + + "\"2015-01-01T00:00:00\",lastActivity = \"0001-01-01T00:00:00\",leadDeveloper = " + + "new { gender = \"Male\", id = 0, firstName = \"Martijn\", lastName = \"Laarman\" }," + + "location = new { lat = 42.1523, lon = -80.321 }}" }, + { "_templateString", "\"{ \\\"match\\\": { \\\"text\\\": \\\"{{query_string}}\\\" } }\"" }, + { "base.QueryJson", "new{ @bool = new { must = new[] { new { match_all = new { } } }, must_not = new[] { new { match_all = new { } } }, should = new[] { new { match_all = new { } } }, filter = new[] { new { match_all = new { } } }, minimum_should_match = 1, boost = 2.0, } }" }, + { "ExpectedTerms", "new [] { \"term1\", \"term2\" }" }, + { "_ctxNumberofCommits", "\"_source.numberOfCommits > 0\"" } + }; + + public static bool TryGetJsonForAnonymousType(this string anonymousTypeString, out string json) + { + json = null; + + foreach (var substitution in Substitutions) + { + anonymousTypeString = anonymousTypeString.Replace(substitution.Key, substitution.Value); + } + + var text = + $@" + using System; + using System.Collections.Generic; + using System.ComponentModel; + using Newtonsoft.Json; + using Newtonsoft.Json.Linq; + + namespace Temporary + {{ + public class Json + {{ + public string Write() + {{ + var o = {anonymousTypeString}; + var json = JsonConvert.SerializeObject(o, Formatting.Indented); + return json; + }} + }} + }}"; + + var syntaxTree = CSharpSyntaxTree.ParseText(text); + var assemblyName = Path.GetRandomFileName(); + var references = new MetadataReference[] + { + MetadataReference.CreateFromFile(typeof(object).GetTypeInfo().Assembly.Location), + MetadataReference.CreateFromFile(typeof(Enumerable).GetTypeInfo().Assembly.Location), + MetadataReference.CreateFromFile(typeof(JsonConvert).GetTypeInfo().Assembly.Location), + MetadataReference.CreateFromFile(typeof(ITypedList).GetTypeInfo().Assembly.Location), + }; + + var compilation = + CSharpCompilation.Create( + assemblyName, + new[] { syntaxTree }, + references, + new CSharpCompilationOptions(OutputKind.DynamicallyLinkedLibrary)); + + using (var ms = new MemoryStream()) + { + var result = compilation.Emit(ms); + + if (!result.Success) + { + var failures = result.Diagnostics.Where(diagnostic => + diagnostic.IsWarningAsError || + diagnostic.Severity == DiagnosticSeverity.Error); + + var builder = new StringBuilder($"Unable to serialize: {anonymousTypeString}"); + foreach (var diagnostic in failures) + { + builder.AppendLine($"{diagnostic.Id}: {diagnostic.GetMessage()}"); + } + builder.AppendLine(new string('-', 30)); + + Console.Error.WriteLine(builder.ToString()); + return false; + } + + ms.Seek(0, SeekOrigin.Begin); + + var assembly = Assembly.Load(ms.ToArray()); + var type = assembly.GetType("Temporary.Json"); + var obj = Activator.CreateInstance(type); + + var output = type.InvokeMember("Write", + BindingFlags.Default | BindingFlags.InvokeMethod, + null, + obj, + new object[] { }); + + json = output.ToString(); + return true; + } + } +#endif + } +} diff --git a/src/CodeGeneration/Nest.Litterateur/SyntaxNodeExtensions.cs b/src/CodeGeneration/Nest.Litterateur/SyntaxNodeExtensions.cs new file mode 100644 index 00000000000..640542083ec --- /dev/null +++ b/src/CodeGeneration/Nest.Litterateur/SyntaxNodeExtensions.cs @@ -0,0 +1,13 @@ +using System.Text.RegularExpressions; +using Microsoft.CodeAnalysis; + +namespace Nest.Litterateur +{ + public static class SyntaxNodeExtensions + { + public static bool ShouldBeHidden(this SyntaxNode node) + { + return node.HasLeadingTrivia && Regex.IsMatch(node.GetLeadingTrivia().ToFullString(), @"\/\/\s*hide"); + } + } +} \ No newline at end of file diff --git a/src/CodeGeneration/Nest.Litterateur/Walkers/CodeWithDocumentationWalker.cs b/src/CodeGeneration/Nest.Litterateur/Walkers/CodeWithDocumentationWalker.cs index 2af708577a0..cb8796ca9bd 100644 --- a/src/CodeGeneration/Nest.Litterateur/Walkers/CodeWithDocumentationWalker.cs +++ b/src/CodeGeneration/Nest.Litterateur/Walkers/CodeWithDocumentationWalker.cs @@ -4,7 +4,9 @@ using Nest.Litterateur.Documentation; using System; using System.Collections.Generic; +using System.IO; using System.Linq; +using System.Reflection; using System.Text; using System.Text.RegularExpressions; using Nest.Litterateur.Documentation.Blocks; @@ -13,24 +15,30 @@ namespace Nest.Litterateur.Walkers { class CodeWithDocumentationWalker : CSharpSyntaxWalker { - public List Blocks { get; } = new List(); - public List TextBlocks { get; } = new List(); - private bool _firstVisit = true; private string _code; + private readonly string _propertyOrMethodName; + public int ClassDepth { get; } + + public List Blocks { get; } = new List(); + + public List TextBlocks { get; } = new List(); + private readonly int? _lineNumberOverride; /// - /// We want to support inlining /** */ documentations because its super handy + /// We want to support inlining /** */ documentations because its super handy /// to document fluent code, what ensues is total hackery /// - /// + /// the depth of the class /// line number used for sorting - public CodeWithDocumentationWalker(int classDepth = 1, int? lineNumber = null) : base(SyntaxWalkerDepth.StructuredTrivia) + /// the name of the property that we are walking + public CodeWithDocumentationWalker(int classDepth = 1, int? lineNumber = null, string propertyOrMethodName = null) : base(SyntaxWalkerDepth.StructuredTrivia) { ClassDepth = classDepth; _lineNumberOverride = lineNumber; + _propertyOrMethodName = propertyOrMethodName; } public override void Visit(SyntaxNode node) @@ -40,25 +48,41 @@ public override void Visit(SyntaxNode node) _firstVisit = false; var repeatedTabs = 2 + ClassDepth; + var language = Language.CSharp; + + + _code = node.WithoutLeadingTrivia().WithTrailingTrivia().ToFullString(); + _code = _code.RemoveNumberOfLeadingTabsAfterNewline(repeatedTabs); - // find x or more repeated tabs and trim x number of tabs from the start - _code = Regex.Replace(_code, $"\t{{{repeatedTabs},}}", match => match.Value.Substring(repeatedTabs)); +#if !DOTNETCORE + if (_propertyOrMethodName == "ExpectJson" || _propertyOrMethodName == "QueryJson") + { + // try to get the json for the anonymous type. + // Only supports system types and Json.Net LINQ objects e.g. JObject + string json; + if (_code.TryGetJsonForAnonymousType(out json)) + { + language = Language.JavaScript; + _code = json; + } + } +#endif + // TODO: Can do this once we get the generic arguments from the Property declaration + //if (_propertyName == "Fluent") + //{ + // // need to know what type we're operating on + // _code += $"client.Search({_code});"; + //} var nodeLine = node.SyntaxTree.GetLineSpan(node.Span).StartLinePosition.Line; - var line = _lineNumberOverride ?? nodeLine; - - var codeBlocks = Regex.Split(_code, @"\/\*\*.*?\*\/", RegexOptions.Singleline) - .Select(b => b.TrimStart('\r', '\n').TrimEnd('\r', '\n', '\t')) - .Where(b => !string.IsNullOrEmpty(b) && b != ";") - .Select(b=>new CodeBlock(b, line)) - .ToList(); + var codeBlocks = ParseCodeBlocks(_code, line, language, _propertyOrMethodName); base.Visit(node); - var nodeHasLeadingTriva = node.HasLeadingTrivia && node.GetLeadingTrivia() - .Any(c=>c.Kind() == SyntaxKind.MultiLineDocumentationCommentTrivia); + var nodeHasLeadingTriva = node.HasLeadingTrivia && + node.GetLeadingTrivia().Any(c => c.Kind() == SyntaxKind.MultiLineDocumentationCommentTrivia); var blocks = codeBlocks.Intertwine(this.TextBlocks, swap: nodeHasLeadingTriva); this.Blocks.Add(new CombinedBlock(blocks, line)); return; @@ -74,20 +98,15 @@ public override void VisitBlock(BlockSyntax node) _firstVisit = false; foreach (var statement in node.Statements) { - var leadingTabs = new string('\t', 3 + ClassDepth); + var repeatedTabs = 3 + ClassDepth; SyntaxNode formattedStatement = statement; - _code = formattedStatement.WithoutLeadingTrivia().WithTrailingTrivia().ToFullString().Replace(leadingTabs, string.Empty); + _code = formattedStatement.WithoutLeadingTrivia().WithTrailingTrivia().ToFullString(); + _code = _code.RemoveNumberOfLeadingTabsAfterNewline(repeatedTabs); var nodeLine = formattedStatement.SyntaxTree.GetLineSpan(node.Span).StartLinePosition.Line; - var line = _lineNumberOverride ?? nodeLine; - - var codeBlocks = Regex.Split(_code, @"\/\*\*.*?\*\/", RegexOptions.Singleline) - .Select(b => b.TrimStart('\r', '\n').TrimEnd('\r', '\n', '\t')) - .Where(b => !string.IsNullOrEmpty(b) && b != ";") - .Select(b => new CodeBlock(b, line)) - .ToList(); + var codeBlocks = ParseCodeBlocks(_code, line, Language.CSharp, _propertyOrMethodName); this.Blocks.AddRange(codeBlocks); } @@ -96,19 +115,41 @@ public override void VisitBlock(BlockSyntax node) } } - public override void VisitXmlText(XmlTextSyntax node) + public override void VisitTrivia(SyntaxTrivia trivia) { - var nodeLine = node.SyntaxTree.GetLineSpan(node.Span).StartLinePosition.Line; - var line = _lineNumberOverride ?? nodeLine; - var text = node.TextTokens - .Where(n => n.Kind() == SyntaxKind.XmlTextLiteralToken) - .Aggregate(new StringBuilder(), (a, t) => a.AppendLine(t.Text.TrimStart()), a => a.ToString()); + if (trivia.Kind() != SyntaxKind.MultiLineDocumentationCommentTrivia) + { + base.VisitTrivia(trivia); + return; + } - this.TextBlocks.Add(new TextBlock(text, line)); + var tokens = trivia.ToFullString() + .RemoveLeadingAndTrailingMultiLineComments() + .SplitOnNewLines(StringSplitOptions.None); + var builder = new StringBuilder(); - base.VisitXmlText(node); - } + foreach (var token in tokens) + { + var currentToken = token.RemoveLeadingSpacesAndAsterisk(); + var decodedToken = System.Net.WebUtility.HtmlDecode(currentToken); + builder.AppendLine(decodedToken); + } + var text = builder.ToString(); + var line = _firstVisit + ? trivia.SyntaxTree.GetLineSpan(trivia.Span).StartLinePosition.Line + : _lineNumberOverride.GetValueOrDefault(0); + this.Blocks.Add(new TextBlock(text, line)); + } + + private List ParseCodeBlocks(string code, int line, Language language, string propertyName) + { + return Regex.Split(code, @"\/\*\*.*?\*\/", RegexOptions.Singleline) + .Select(b => b.TrimStart('\r', '\n').TrimEnd('\r', '\n', '\t')) + .Where(b => !string.IsNullOrEmpty(b) && b != ";") + .Select(b => new CodeBlock(b, line, language, propertyName)) + .ToList(); + } } } diff --git a/src/CodeGeneration/Nest.Litterateur/Walkers/DocumentationFileWalker.cs b/src/CodeGeneration/Nest.Litterateur/Walkers/DocumentationFileWalker.cs index 6ed9d53a679..b60df47d710 100644 --- a/src/CodeGeneration/Nest.Litterateur/Walkers/DocumentationFileWalker.cs +++ b/src/CodeGeneration/Nest.Litterateur/Walkers/DocumentationFileWalker.cs @@ -1,7 +1,6 @@ using Microsoft.CodeAnalysis; using Microsoft.CodeAnalysis.CSharp; using Microsoft.CodeAnalysis.CSharp.Syntax; -using Nest.Litterateur.Documentation; using System; using System.Collections.Generic; using System.Linq; @@ -12,43 +11,90 @@ namespace Nest.Litterateur.Walkers { class DocumentationFileWalker : CSharpSyntaxWalker { + private static readonly string[] PropertyOrMethodNamesOfInterest = + { + "ExpectJson", + "QueryJson", + "Fluent", + "Initializer", + "QueryFluent", + "QueryInitializer" + }; + + private string _propertyOrMethodName; + public DocumentationFileWalker() : base(SyntaxWalkerDepth.StructuredTrivia) { } private int ClassDepth { get; set; } + public int InterfaceDepth { get; set; } private bool InsideMultiLineDocumentation { get; set; } private bool InsideAutoIncludeMethodBlock { get; set; } private bool InsideFluentOrInitializerExample { get; set; } + private bool IncludeMethodBlockContainsLambda { get; set; } + private int EndLine { get; set; } public List Blocks { get; } = new List(); + public override void VisitInterfaceDeclaration(InterfaceDeclarationSyntax node) + { + if (node.ShouldBeHidden()) return; + + if (node.ChildNodes().All(childNode => childNode is PropertyDeclarationSyntax || childNode is AttributeListSyntax)) + { + // simple nested interface + var line = node.SyntaxTree.GetLineSpan(node.Span).StartLinePosition.Line; + var walker = new CodeWithDocumentationWalker(0, line); + walker.Visit(node); + this.Blocks.AddRange(walker.Blocks); + } + } + public override void VisitClassDeclaration(ClassDeclarationSyntax node) { + if (node.ShouldBeHidden()) return; + ++ClassDepth; if (ClassDepth == 1) { base.VisitClassDeclaration(node); - } - // are we dealing with a simple nested POCO? + } else if (node.ChildNodes().All(childNode => childNode is PropertyDeclarationSyntax || childNode is AttributeListSyntax)) - { + { + // simple nested POCO var line = node.SyntaxTree.GetLineSpan(node.Span).StartLinePosition.Line; var walker = new CodeWithDocumentationWalker(ClassDepth - 2, line); walker.Visit(node); this.Blocks.AddRange(walker.Blocks); } + else + { + var methods = node.ChildNodes().OfType(); + if (!methods.Any(m => m.AttributeLists.SelectMany(a => a.Attributes).Any())) + { + // nested class with methods that are not unit or integration tests e.g. example PropertyVisitor in Automap.doc.cs + var line = node.SyntaxTree.GetLineSpan(node.Span).StartLinePosition.Line; + var walker = new CodeWithDocumentationWalker(ClassDepth - 2, line); + walker.Visit(node); + this.Blocks.AddRange(walker.Blocks); + } + } --ClassDepth; } public override void VisitPropertyDeclaration(PropertyDeclarationSyntax node) { - var propertyName = node.Identifier.Text; - if (propertyName == "Fluent") - { - this.InsideFluentOrInitializerExample = true; - base.VisitPropertyDeclaration(node); - this.InsideFluentOrInitializerExample = false; - } - else if (propertyName == "Initializer") + if (node.ShouldBeHidden()) return; + + _propertyOrMethodName = node.Identifier.Text; + if (PropertyOrMethodNamesOfInterest.Contains(_propertyOrMethodName)) { + // TODO: Look to get the generic types for the call so that we can prettify the fluent and OIS calls in docs e.g. client.Search({Call}); + // var genericArguments = node.DescendantNodes().OfType().FirstOrDefault(); + // List arguments = new List(); + // if (genericArguments != null) + // { + // arguments.AddRange(genericArguments.TypeArgumentList.Arguments); + // } + this.InsideFluentOrInitializerExample = true; base.VisitPropertyDeclaration(node); this.InsideFluentOrInitializerExample = false; @@ -57,71 +103,116 @@ public override void VisitPropertyDeclaration(PropertyDeclarationSyntax node) public override void VisitArrowExpressionClause(ArrowExpressionClauseSyntax node) { - if (!this.InsideFluentOrInitializerExample) return; + if (node.ShouldBeHidden()) return; + + if (!this.InsideFluentOrInitializerExample && !PropertyOrMethodNamesOfInterest.Contains(_propertyOrMethodName)) return; var syntaxNode = node?.ChildNodes()?.LastOrDefault()?.WithAdditionalAnnotations(); if (syntaxNode == null) return; var line = node.SyntaxTree.GetLineSpan(node.Span).StartLinePosition.Line; - var walker = new CodeWithDocumentationWalker(ClassDepth, line); + var walker = new CodeWithDocumentationWalker(ClassDepth, line, _propertyOrMethodName); walker.Visit(syntaxNode); this.Blocks.AddRange(walker.Blocks); } public override void VisitAccessorDeclaration(AccessorDeclarationSyntax node) { + if (node.ShouldBeHidden()) return; + if (!this.InsideFluentOrInitializerExample) return; var syntaxNode = node?.ChildNodes()?.LastOrDefault()?.WithAdditionalAnnotations() as BlockSyntax; if (syntaxNode == null) return; var line = node.SyntaxTree.GetLineSpan(node.Span).StartLinePosition.Line; - var walker = new CodeWithDocumentationWalker(ClassDepth, line); + var walker = new CodeWithDocumentationWalker(ClassDepth, line, _propertyOrMethodName); walker.VisitBlock(syntaxNode); this.Blocks.AddRange(walker.Blocks); } public override void VisitMethodDeclaration(MethodDeclarationSyntax node) { + if (node.ShouldBeHidden()) return; + if (this.ClassDepth == 1) this.InsideAutoIncludeMethodBlock = true; + _propertyOrMethodName = node.Identifier.Text; base.VisitMethodDeclaration(node); this.InsideAutoIncludeMethodBlock = false; + this.IncludeMethodBlockContainsLambda = false; + this.EndLine = 0; } public override void VisitExpressionStatement(ExpressionStatementSyntax node) { + if (node.ShouldBeHidden()) return; + if (this.InsideAutoIncludeMethodBlock) { var line = node.SyntaxTree.GetLineSpan(node.Span).StartLinePosition.Line; + + // this lambda has already been included so skip it + if (IncludeMethodBlockContainsLambda && this.EndLine >= line) + { + return; + } + var allchildren = node.DescendantNodesAndTokens(descendIntoTrivia: true); if (allchildren.Any(a => a.Kind() == SyntaxKind.MultiLineDocumentationCommentTrivia)) { - var walker = new CodeWithDocumentationWalker(ClassDepth, line); + var walker = new CodeWithDocumentationWalker(ClassDepth, line, _propertyOrMethodName); walker.Visit(node.WithAdditionalAnnotations()); this.Blocks.AddRange(walker.Blocks); return; } base.VisitExpressionStatement(node); - this.Blocks.Add(new CodeBlock(node.WithoutLeadingTrivia().ToFullString(), line)); + var code = node.WithoutLeadingTrivia().ToFullString(); + code = code.RemoveNumberOfLeadingTabsAfterNewline(ClassDepth + 2); + this.Blocks.Add(new CodeBlock(code, line, Language.CSharp, _propertyOrMethodName)); } else base.VisitExpressionStatement(node); - } public override void VisitLocalDeclarationStatement(LocalDeclarationStatementSyntax node) { + if (node.ShouldBeHidden()) return; + if (this.InsideAutoIncludeMethodBlock) { var allchildren = node.DescendantNodesAndTokens(descendIntoTrivia: true); - var line = node.SyntaxTree.GetLineSpan(node.Span).StartLinePosition.Line; + var linePositionSpan = node.SyntaxTree.GetLineSpan(node.Span); + var line = linePositionSpan.StartLinePosition.Line; if (allchildren.Any(a => a.Kind() == SyntaxKind.MultiLineDocumentationCommentTrivia)) { - var walker = new CodeWithDocumentationWalker(ClassDepth, line); + var walker = new CodeWithDocumentationWalker(ClassDepth, line, _propertyOrMethodName); walker.Visit(node.WithAdditionalAnnotations()); this.Blocks.AddRange(walker.Blocks); return; } - this.Blocks.Add(new CodeBlock(node.WithoutLeadingTrivia().ToFullString(), line)); + var code = node.WithoutLeadingTrivia().ToFullString(); + code = code.RemoveNumberOfLeadingTabsAfterNewline(ClassDepth + 2); + this.Blocks.Add(new CodeBlock(code, line, Language.CSharp, _propertyOrMethodName)); + + if (allchildren.Any(a => a.Kind() == SyntaxKind.SimpleLambdaExpression)) + { + // nested lambda inside this local declaration + this.IncludeMethodBlockContainsLambda = true; + this.EndLine = linePositionSpan.EndLinePosition.Line; + } } base.VisitLocalDeclarationStatement(node); } + public override void VisitForEachStatement(ForEachStatementSyntax node) + { + if (node.ShouldBeHidden()) return; + + if (this.InsideAutoIncludeMethodBlock) + { + var line = node.SyntaxTree.GetLineSpan(node.Span).StartLinePosition.Line; + var walker = new CodeWithDocumentationWalker(ClassDepth, line, _propertyOrMethodName); + walker.Visit(node); + this.Blocks.AddRange(walker.Blocks); + } + else base.VisitForEachStatement(node); + } + public override void VisitTrivia(SyntaxTrivia trivia) { if (trivia.Kind() != SyntaxKind.MultiLineDocumentationCommentTrivia) @@ -131,23 +222,24 @@ public override void VisitTrivia(SyntaxTrivia trivia) } this.InsideMultiLineDocumentation = true; - this.CreateTextBlocksFromTrivia(trivia); - this.InsideMultiLineDocumentation = false; - } - private void CreateTextBlocksFromTrivia(SyntaxTrivia trivia) - { - var tokens = trivia.ToFullString().TrimStart('/', '*').TrimEnd('*', '/').Split('\n'); + var tokens = trivia.ToFullString() + .RemoveLeadingAndTrailingMultiLineComments() + .SplitOnNewLines(StringSplitOptions.None); var builder = new StringBuilder(); + foreach (var token in tokens) { - var decodedToken = System.Net.WebUtility.HtmlDecode(token.Trim().Trim('*').Trim()); + var currentToken = token.RemoveLeadingSpacesAndAsterisk(); + var decodedToken = System.Net.WebUtility.HtmlDecode(currentToken); builder.AppendLine(decodedToken); } var text = builder.ToString(); var line = trivia.SyntaxTree.GetLineSpan(trivia.Span).StartLinePosition.Line; this.Blocks.Add(new TextBlock(text, line)); + + this.InsideMultiLineDocumentation = false; } } } diff --git a/src/CodeGeneration/Nest.Litterateur/paket.references b/src/CodeGeneration/Nest.Litterateur/paket.references index b3b05a0fa7b..366c1bd2d08 100644 --- a/src/CodeGeneration/Nest.Litterateur/paket.references +++ b/src/CodeGeneration/Nest.Litterateur/paket.references @@ -1 +1,3 @@ -Microsoft.CodeAnalysis.CSharp \ No newline at end of file +Microsoft.CodeAnalysis.CSharp +AsciiDocNet +Newtonsoft.Json \ No newline at end of file diff --git a/src/CodeGeneration/Nest.Litterateur/project.json b/src/CodeGeneration/Nest.Litterateur/project.json index 3365fd2e146..b1bccdffd1a 100644 --- a/src/CodeGeneration/Nest.Litterateur/project.json +++ b/src/CodeGeneration/Nest.Litterateur/project.json @@ -5,7 +5,7 @@ "emitEntryPoint": true }, "dependencies": { - + "Newtonsoft.Json": "8.0.2", }, "commands": { "Nest.Litterateur": "Nest.Litterateur" @@ -13,55 +13,68 @@ "configurations": { "Debug": { "compilationOptions": { - "define": [ "DEBUG", "TRACE" ] + "define": [ + "DEBUG", + "TRACE" + ] } }, "Release": { "compilationOptions": { - "define": [ "RELEASE", "TRACE" ], + "define": [ + "RELEASE", + "TRACE" + ], "optimize": true } } }, - "frameworks": { - "dnx451": { - "frameworkAssemblies": { - "System.Runtime": "", - "System.Runtime.Serialization": "", - "System.Threading.Tasks": "", - "System.Text.Encoding": "" - }, - "dependencies": { - "Microsoft.CSharp": "4.0.1-beta-23409", - "Microsoft.CodeAnalysis": "1.1.1" - } - }, - "dotnet5.1": { - "compilationOptions": { "define": [ "DOTNETCORE" ] }, - "dependencies": { - "System.Runtime": "4.0.21-beta-23225", - "System.Collections": "4.0.11-beta-23225", - "System.Reflection": "4.1.0-beta-23225", - "System.Collections.Specialized": "4.0.0-beta-23109", - "System.Linq": "4.0.0-beta-23109", - "System.IO.FileSystem": "4.0.0-beta-23109", - "System.IO.Compression": "4.0.0-beta-23109", - "System.Runtime.Serialization.Primitives": "4.0.10-beta-23109", - "System.Text.RegularExpressions": "4.0.10-beta-23109", - "System.Collections.Concurrent": "4.0.10-beta-23109", - "System.Reflection.Extensions": "4.0.0-beta-23109", - "System.Reflection.TypeExtensions": "4.0.0-beta-23109", - "System.Reflection.Metadata": "1.1.0-alpha-00009", - "System.Reflection.Primitives": "4.0.0-beta-23109", - "System.Linq.Expressions": "4.0.10-beta-23109", - "System.Dynamic.Runtime": "4.0.11-beta-23225", - "Microsoft.CSharp": "4.0.1-beta-23409", - "Microsoft.CodeAnalysis": "1.1.1", - "System.Security.Cryptography.Encoding": "4.0.0-beta-23225", - "System.Security.Cryptography.X509Certificates": "4.0.0-beta-23225", - "System.ComponentModel.TypeConverter": "4.0.0-beta-23109", - "System.Net.Http": "4.0.1-beta-23225" - } - } + "frameworks": { + "dnx451": { + "frameworkAssemblies": { + "System.Runtime": "", + "System.Runtime.Serialization": "", + "System.Threading.Tasks": "", + "System.Text.Encoding": "", + "System.IO": "" + }, + "dependencies": { + "Microsoft.CSharp": "4.0.1-beta-23409", + "Microsoft.CodeAnalysis": "1.1.1", + "AsciiDocNet": "1.0.0-alpha2" + } + }, + "dotnet5.1": { + "compilationOptions": { + "define": [ + "DOTNETCORE" + ] + }, + "dependencies": { + "System.Runtime": "4.0.21-beta-23225", + "System.Collections": "4.0.11-beta-23225", + "System.Reflection": "4.1.0-beta-23225", + "System.Collections.Specialized": "4.0.0-beta-23109", + "System.Linq": "4.0.0-beta-23109", + "System.IO": "4.0.0-beta-23109", + "System.IO.FileSystem": "4.0.0-beta-23109", + "System.IO.Compression": "4.0.0-beta-23109", + "System.Runtime.Serialization.Primitives": "4.0.10-beta-23109", + "System.Text.RegularExpressions": "4.0.10-beta-23109", + "System.Collections.Concurrent": "4.0.10-beta-23109", + "System.Reflection.Extensions": "4.0.0-beta-23109", + "System.Reflection.TypeExtensions": "4.0.0-beta-23109", + "System.Reflection.Metadata": "1.1.0-alpha-00009", + "System.Reflection.Primitives": "4.0.0-beta-23109", + "System.Linq.Expressions": "4.0.10-beta-23109", + "System.Dynamic.Runtime": "4.0.11-beta-23225", + "Microsoft.CSharp": "4.0.1-beta-23409", + "Microsoft.CodeAnalysis": "1.1.1", + "System.Security.Cryptography.Encoding": "4.0.0-beta-23225", + "System.Security.Cryptography.X509Certificates": "4.0.0-beta-23225", + "System.ComponentModel.TypeConverter": "4.0.0-beta-23109", + "System.Net.Http": "4.0.1-beta-23225" + } } + } } \ No newline at end of file diff --git a/src/Elasticsearch.Net/Responses/ElasticsearchResponse.cs b/src/Elasticsearch.Net/Responses/ElasticsearchResponse.cs index b4a41aa866c..140251cd4b9 100644 --- a/src/Elasticsearch.Net/Responses/ElasticsearchResponse.cs +++ b/src/Elasticsearch.Net/Responses/ElasticsearchResponse.cs @@ -75,7 +75,7 @@ public class ElasticsearchResponse : IApiCallDetails public bool SuccessOrKnownError => this.Success || (HttpStatusCode >= 400 && HttpStatusCode < 599 && HttpStatusCode != 503 //service unavailable needs to be retried - && HttpStatusCode != 502 //bad gateway needs to be retried + && HttpStatusCode != 502 //bad gateway needs to be retried ); public Exception OriginalException { get; protected internal set; } @@ -104,6 +104,6 @@ public string DebugInformation } } - public override string ToString() => $"{(Success ? "S" : "Uns")}uccesful low level call on {HttpMethod.GetStringValue()}: {Uri.PathAndQuery}"; + public override string ToString() => $"{(Success ? "S" : "Uns")}uccessful low level call on {HttpMethod.GetStringValue()}: {Uri.PathAndQuery}"; } } diff --git a/src/Elasticsearch.Net/project.json b/src/Elasticsearch.Net/project.json index ea00f9507da..0d262a2d862 100644 --- a/src/Elasticsearch.Net/project.json +++ b/src/Elasticsearch.Net/project.json @@ -25,6 +25,7 @@ }, "copyright": "2014-2016 Elasticsearch BV", "version": "2.0.5", + "releaseNotes": "See https://github.com/elastic/elasticsearch-net/releases", "compilationOptions": { "warningsAsErrors": false }, diff --git a/src/Nest/Aggregations/AggregationContainer.cs b/src/Nest/Aggregations/AggregationContainer.cs index ffc88ad1aa6..fbd32f4f037 100644 --- a/src/Nest/Aggregations/AggregationContainer.cs +++ b/src/Nest/Aggregations/AggregationContainer.cs @@ -2,7 +2,6 @@ using System.Collections.Generic; using System.Linq; using Newtonsoft.Json; -using Nest.Aggregations.Visitor; namespace Nest { diff --git a/src/Nest/Aggregations/Visitor/AggregationVisitor.cs b/src/Nest/Aggregations/Visitor/AggregationVisitor.cs index 85999bebcd5..c5041fb2241 100644 --- a/src/Nest/Aggregations/Visitor/AggregationVisitor.cs +++ b/src/Nest/Aggregations/Visitor/AggregationVisitor.cs @@ -1,249 +1,263 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; - -namespace Nest.Aggregations.Visitor -{ - - public interface IAggregationVisitor - { - /// - /// The current depth of the node being visited - /// - int Depth { get; set; } - - /// - /// Hints the relation with the parent, i.e aggregations contained inside a bucket aggregation will have AggregationVisitorScope.Bucket set. - /// - AggregationVisitorScope Scope { get; set; } - - /// - /// Visit the aggregation container just before we dispatch into the aggregation it holds - /// - /// - void Visit(IAggregationContainer aggregationContainer); - - /// - /// Visit every aggregation item just before they are visited by their specialized Visit() implementation - /// - /// The IAggregation object that will be visited - void Visit(IAggregation aggregation); - void Visit(IAverageAggregation aggregation); - void Visit(IValueCountAggregation aggregation); - void Visit(IMaxAggregation aggregation); - void Visit(IMinAggregation aggregation); - void Visit(IStatsAggregator aggregation); - void Visit(ISumAggregation aggregation); - void Visit(IExtendedStatsAggregation aggregation); - void Visit(IDateHistogramAggregation aggregation); - void Visit(IPercentilesAggregation aggregation); - void Visit(IDateRangeAggregation aggregation); - void Visit(IFilterAggregation aggregation); - void Visit(IFiltersAggregation aggregation); - void Visit(IGeoDistanceAggregation aggregation); - void Visit(IGeoHashGridAggregation aggregation); - void Visit(IGeoBoundsAggregation aggregation); - void Visit(IHistogramAggregation aggregation); - void Visit(IGlobalAggregation aggregation); - void Visit(IIpRangeAggregation aggregation); - void Visit(ICardinalityAggregation aggregation); - void Visit(IMissingAggregation aggregation); - void Visit(INestedAggregation aggregation); - void Visit(IReverseNestedAggregation aggregation); - void Visit(IRangeAggregation aggregation); - void Visit(ITermsAggregation aggregation); - void Visit(ISignificantTermsAggregation aggregation); - void Visit(IPercentileRanksAggregation aggregation); - void Visit(ITopHitsAggregation aggregation); - void Visit(IChildrenAggregation aggregation); - void Visit(IScriptedMetricAggregation aggregation); - void Visit(IAverageBucketAggregation aggregation); - void Visit(IDerivativeAggregation aggregation); - void Visit(IMaxBucketAggregation aggregation); - void Visit(IMinBucketAggregation aggregation); - void Visit(ISumBucketAggregation aggregation); - void Visit(IMovingAverageAggregation aggregation); - void Visit(ICumulativeSumAggregation aggregation); - void Visit(ISerialDifferencingAggregation aggregation); - void Visit(IBucketScriptAggregation aggregation); - void Visit(IBucketSelectorAggregation aggregation); - void Visit(ISamplerAggregation aggregation); - } - - public class AggregationVisitor : IAggregationVisitor - { - public int Depth { get; set; } - - public AggregationVisitorScope Scope { get; set; } - - public virtual void Visit(IValueCountAggregation aggregation) - { - } - - public virtual void Visit(IMinAggregation aggregation) - { - } - - public virtual void Visit(ISumAggregation aggregation) - { - } - - public virtual void Visit(IDateHistogramAggregation aggregation) - { - } - - public virtual void Visit(IDateRangeAggregation aggregation) - { - } - - public virtual void Visit(IFiltersAggregation aggregation) - { - } - - public virtual void Visit(IGeoHashGridAggregation aggregation) - { - } - - public virtual void Visit(IHistogramAggregation aggregation) - { - } - - public virtual void Visit(IIpRangeAggregation aggregation) - { - } - - public virtual void Visit(IMissingAggregation aggregation) - { - } - - public virtual void Visit(IReverseNestedAggregation aggregation) - { - } - - public virtual void Visit(ITermsAggregation aggregation) - { - } - - public virtual void Visit(IPercentileRanksAggregation aggregation) - { - } - - public virtual void Visit(IChildrenAggregation aggregation) - { - } - - public virtual void Visit(IAverageBucketAggregation aggregation) - { - } - - public virtual void Visit(IMaxBucketAggregation aggregation) - { - } - - public virtual void Visit(ISumBucketAggregation aggregation) - { - } - - public virtual void Visit(ICumulativeSumAggregation aggregation) - { - } - - public virtual void Visit(IBucketScriptAggregation aggregation) - { - } - - public virtual void Visit(ISamplerAggregation aggregation) - { - } - - public virtual void Visit(IBucketSelectorAggregation aggregation) - { - } - - public virtual void Visit(ISerialDifferencingAggregation aggregation) - { - } - - public virtual void Visit(IMovingAverageAggregation aggregation) - { - } - - public virtual void Visit(IMinBucketAggregation aggregation) - { - } - - public virtual void Visit(IDerivativeAggregation aggregation) - { - } - - public virtual void Visit(IScriptedMetricAggregation aggregation) - { - } - - public virtual void Visit(ITopHitsAggregation aggregation) - { - } - - public virtual void Visit(ISignificantTermsAggregation aggregation) - { - } - - public virtual void Visit(IRangeAggregation aggregation) - { - } - - public virtual void Visit(INestedAggregation aggregation) - { - } - - public virtual void Visit(ICardinalityAggregation aggregation) - { - } - - public virtual void Visit(IGlobalAggregation aggregation) - { - } - - public virtual void Visit(IGeoBoundsAggregation aggregation) - { - } - - public virtual void Visit(IGeoDistanceAggregation aggregation) - { - } - - public virtual void Visit(IFilterAggregation aggregation) - { - } - - public virtual void Visit(IPercentilesAggregation aggregation) - { - } - - public virtual void Visit(IExtendedStatsAggregation aggregation) - { - } - - public virtual void Visit(IStatsAggregator aggregation) - { - } - - public virtual void Visit(IMaxAggregation aggregation) - { - } - - public virtual void Visit(IAverageAggregation aggregation) - { - } - - public virtual void Visit(IAggregation aggregation) - { - } - - public virtual void Visit(IAggregationContainer aggregationContainer) - { - } - } -} +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace Nest +{ + public interface IAggregationVisitor + { + /// + /// The current depth of the node being visited + /// + int Depth { get; set; } + + /// + /// Hints the relation with the parent, i.e aggregations contained inside a bucket aggregation will have AggregationVisitorScope.Bucket set. + /// + AggregationVisitorScope Scope { get; set; } + + /// + /// Visit the aggregation container just before we dispatch into the aggregation it holds + /// + /// + void Visit(IAggregationContainer aggregationContainer); + + /// + /// Visit every aggregation item just before they are visited by their specialized Visit() implementation + /// + /// The IAggregation object that will be visited + void Visit(IAggregation aggregation); + void Visit(IAverageAggregation aggregation); + void Visit(IValueCountAggregation aggregation); + void Visit(IMaxAggregation aggregation); + void Visit(IMinAggregation aggregation); + void Visit(IStatsAggregator aggregation); + void Visit(ISumAggregation aggregation); + void Visit(IExtendedStatsAggregation aggregation); + void Visit(IDateHistogramAggregation aggregation); + void Visit(IPercentilesAggregation aggregation); + void Visit(IDateRangeAggregation aggregation); + void Visit(IFilterAggregation aggregation); + void Visit(IFiltersAggregation aggregation); + void Visit(IGeoDistanceAggregation aggregation); + void Visit(IGeoHashGridAggregation aggregation); + void Visit(IGeoBoundsAggregation aggregation); + void Visit(IHistogramAggregation aggregation); + void Visit(IGlobalAggregation aggregation); + void Visit(IIpRangeAggregation aggregation); + void Visit(ICardinalityAggregation aggregation); + void Visit(IMissingAggregation aggregation); + void Visit(INestedAggregation aggregation); + void Visit(IReverseNestedAggregation aggregation); + void Visit(IRangeAggregation aggregation); + void Visit(ITermsAggregation aggregation); + void Visit(ISignificantTermsAggregation aggregation); + void Visit(IPercentileRanksAggregation aggregation); + void Visit(ITopHitsAggregation aggregation); + void Visit(IChildrenAggregation aggregation); + void Visit(IScriptedMetricAggregation aggregation); + void Visit(IAverageBucketAggregation aggregation); + void Visit(IDerivativeAggregation aggregation); + void Visit(IMaxBucketAggregation aggregation); + void Visit(IMinBucketAggregation aggregation); + void Visit(ISumBucketAggregation aggregation); + void Visit(IStatsBucketAggregation aggregation); + void Visit(IExtendedStatsBucketAggregation aggregation); + void Visit(IPercentilesBucketAggregation aggregation); + void Visit(IMovingAverageAggregation aggregation); + void Visit(ICumulativeSumAggregation aggregation); + void Visit(ISerialDifferencingAggregation aggregation); + void Visit(IBucketScriptAggregation aggregation); + void Visit(IBucketSelectorAggregation aggregation); + void Visit(ISamplerAggregation aggregation); + } + + public class AggregationVisitor : IAggregationVisitor + { + public int Depth { get; set; } + + public AggregationVisitorScope Scope { get; set; } + + public virtual void Visit(IValueCountAggregation aggregation) + { + } + + public virtual void Visit(IMinAggregation aggregation) + { + } + + public virtual void Visit(ISumAggregation aggregation) + { + } + + public virtual void Visit(IDateHistogramAggregation aggregation) + { + } + + public virtual void Visit(IDateRangeAggregation aggregation) + { + } + + public virtual void Visit(IFiltersAggregation aggregation) + { + } + + public virtual void Visit(IGeoHashGridAggregation aggregation) + { + } + + public virtual void Visit(IHistogramAggregation aggregation) + { + } + + public virtual void Visit(IIpRangeAggregation aggregation) + { + } + + public virtual void Visit(IMissingAggregation aggregation) + { + } + + public virtual void Visit(IReverseNestedAggregation aggregation) + { + } + + public virtual void Visit(ITermsAggregation aggregation) + { + } + + public virtual void Visit(IPercentileRanksAggregation aggregation) + { + } + + public virtual void Visit(IChildrenAggregation aggregation) + { + } + + public virtual void Visit(IAverageBucketAggregation aggregation) + { + } + + public virtual void Visit(IMaxBucketAggregation aggregation) + { + } + + public virtual void Visit(ISumBucketAggregation aggregation) + { + } + + public virtual void Visit(IStatsBucketAggregation aggregation) + { + } + + public virtual void Visit(IExtendedStatsBucketAggregation aggregation) + { + } + + public virtual void Visit(IPercentilesBucketAggregation aggregation) + { + } + + public virtual void Visit(ICumulativeSumAggregation aggregation) + { + } + + public virtual void Visit(IBucketScriptAggregation aggregation) + { + } + + public virtual void Visit(ISamplerAggregation aggregation) + { + } + + public virtual void Visit(IBucketSelectorAggregation aggregation) + { + } + + public virtual void Visit(ISerialDifferencingAggregation aggregation) + { + } + + public virtual void Visit(IMovingAverageAggregation aggregation) + { + } + + public virtual void Visit(IMinBucketAggregation aggregation) + { + } + + public virtual void Visit(IDerivativeAggregation aggregation) + { + } + + public virtual void Visit(IScriptedMetricAggregation aggregation) + { + } + + public virtual void Visit(ITopHitsAggregation aggregation) + { + } + + public virtual void Visit(ISignificantTermsAggregation aggregation) + { + } + + public virtual void Visit(IRangeAggregation aggregation) + { + } + + public virtual void Visit(INestedAggregation aggregation) + { + } + + public virtual void Visit(ICardinalityAggregation aggregation) + { + } + + public virtual void Visit(IGlobalAggregation aggregation) + { + } + + public virtual void Visit(IGeoBoundsAggregation aggregation) + { + } + + public virtual void Visit(IGeoDistanceAggregation aggregation) + { + } + + public virtual void Visit(IFilterAggregation aggregation) + { + } + + public virtual void Visit(IPercentilesAggregation aggregation) + { + } + + public virtual void Visit(IExtendedStatsAggregation aggregation) + { + } + + public virtual void Visit(IStatsAggregator aggregation) + { + } + + public virtual void Visit(IMaxAggregation aggregation) + { + } + + public virtual void Visit(IAverageAggregation aggregation) + { + } + + public virtual void Visit(IAggregation aggregation) + { + } + + public virtual void Visit(IAggregationContainer aggregationContainer) + { + } + } +} diff --git a/src/Nest/Aggregations/Visitor/AggregationWalker.cs b/src/Nest/Aggregations/Visitor/AggregationWalker.cs index b1d12e5c6a4..b229aa16e24 100644 --- a/src/Nest/Aggregations/Visitor/AggregationWalker.cs +++ b/src/Nest/Aggregations/Visitor/AggregationWalker.cs @@ -1,81 +1,84 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; - -namespace Nest.Aggregations.Visitor -{ - public class AggregationWalker - { - private void Accept(IAggregationVisitor visitor, AggregationDictionary aggregations) - { - if (!aggregations.HasAny()) return; - foreach (var f in aggregations) - this.Accept(visitor, f.Value, AggregationVisitorScope.Bucket); - } - - private void Accept(IAggregationVisitor visitor, IAggregationContainer aggregation, AggregationVisitorScope scope = AggregationVisitorScope.Aggregation) - { - if (aggregation == null) return; - visitor.Scope = scope; - aggregation.Accept(visitor); - } - - private static void AcceptAggregation(T aggregation, IAggregationVisitor visitor, Action scoped) - where T : class, IAggregation - { - if (aggregation == null) return; - - visitor.Depth = visitor.Depth + 1; - visitor.Visit(aggregation); - scoped(visitor, aggregation); - visitor.Depth = visitor.Depth - 1; - } - - public void Walk(IAggregationContainer aggregation, IAggregationVisitor visitor) - { - visitor.Visit(aggregation); - AcceptAggregation(aggregation.Average, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.AverageBucket, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.BucketScript, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.BucketSelector, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.Cardinality, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.Children, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.CumulativeSum, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.DateHistogram, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.DateRange, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.Derivative, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.ExtendedStats, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.Filter, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.Filters, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.GeoBounds, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.GeoDistance, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.GeoHash, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.Global, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.Histogram, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.IpRange, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.Max, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.MaxBucket, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.Min, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.MinBucket, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.Missing, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.MovingAverage, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.Nested, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.PercentileRanks, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.Percentiles, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.Range, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.ReverseNested, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.Sampler, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.ScriptedMetric, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.SerialDifferencing, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.SignificantTerms, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.Stats, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.Sum, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.SumBucket, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.Terms, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); - AcceptAggregation(aggregation.TopHits, visitor, (v, d) => v.Visit(d)); - AcceptAggregation(aggregation.ValueCount, visitor, (v, d) => v.Visit(d)); - } - } -} +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace Nest +{ + public class AggregationWalker + { + private void Accept(IAggregationVisitor visitor, AggregationDictionary aggregations) + { + if (!aggregations.HasAny()) return; + foreach (var f in aggregations) + this.Accept(visitor, f.Value, AggregationVisitorScope.Bucket); + } + + private void Accept(IAggregationVisitor visitor, IAggregationContainer aggregation, AggregationVisitorScope scope = AggregationVisitorScope.Aggregation) + { + if (aggregation == null) return; + visitor.Scope = scope; + aggregation.Accept(visitor); + } + + private static void AcceptAggregation(T aggregation, IAggregationVisitor visitor, Action scoped) + where T : class, IAggregation + { + if (aggregation == null) return; + + visitor.Depth = visitor.Depth + 1; + visitor.Visit(aggregation); + scoped(visitor, aggregation); + visitor.Depth = visitor.Depth - 1; + } + + public void Walk(IAggregationContainer aggregation, IAggregationVisitor visitor) + { + visitor.Visit(aggregation); + AcceptAggregation(aggregation.Average, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.AverageBucket, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.BucketScript, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.BucketSelector, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.Cardinality, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.Children, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.CumulativeSum, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.DateHistogram, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.DateRange, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.Derivative, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.ExtendedStats, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.Filter, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.Filters, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.GeoBounds, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.GeoDistance, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.GeoHash, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.Global, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.Histogram, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.IpRange, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.Max, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.MaxBucket, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.Min, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.MinBucket, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.Missing, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.MovingAverage, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.Nested, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.PercentileRanks, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.Percentiles, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.Range, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.ReverseNested, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.Sampler, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.ScriptedMetric, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.SerialDifferencing, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.SignificantTerms, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.Stats, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.Sum, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.SumBucket, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.StatsBucket, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.ExtendedStatsBucket, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.PercentilesBucket, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.Terms, visitor, (v, d) => { v.Visit(d); this.Accept(v, d.Aggregations); }); + AcceptAggregation(aggregation.TopHits, visitor, (v, d) => v.Visit(d)); + AcceptAggregation(aggregation.ValueCount, visitor, (v, d) => v.Visit(d)); + } + } +} diff --git a/src/Nest/Cat/CatNodeAttributes/CatNodeAttributesRequest.cs b/src/Nest/Cat/CatNodeAttributes/CatNodeAttributesRequest.cs index 7bfe3d3573e..ca8329771f9 100644 --- a/src/Nest/Cat/CatNodeAttributes/CatNodeAttributesRequest.cs +++ b/src/Nest/Cat/CatNodeAttributes/CatNodeAttributesRequest.cs @@ -1,5 +1,5 @@ using System; -#pragma warning disable 612 +#pragma warning disable 612, 618 namespace Nest { @@ -10,3 +10,5 @@ public partial class CatNodeAttributesRequest { } [DescriptorFor("CatNodeattrs")] public partial class CatNodeAttributesDescriptor { } } + +#pragma warning restore 612, 618 diff --git a/src/Nest/CommonAbstractions/Infer/Field/Field.cs b/src/Nest/CommonAbstractions/Infer/Field/Field.cs index 1d12cb85616..4235d7061b1 100644 --- a/src/Nest/CommonAbstractions/Infer/Field/Field.cs +++ b/src/Nest/CommonAbstractions/Infer/Field/Field.cs @@ -9,9 +9,12 @@ namespace Nest [ContractJsonConverter(typeof(FieldJsonConverter))] public class Field : IEquatable, IUrlParameter { - public string Name { get; set; } - public Expression Expression { get; set; } - public PropertyInfo Property { get; set; } + public string Name { get; private set; } + + public Expression Expression { get; private set; } + + public PropertyInfo Property { get; private set; } + public double? Boost { get; set; } private object ComparisonValue { get; set; } @@ -21,26 +24,42 @@ public Fields And(Expression> field) where T : class => public Fields And(string field) => new Fields(new [] { this, field }); - public static Field Create(string name, double? boost = null) + public Field(string name, double? boost = null) { - if (name.IsNullOrEmpty()) return null; + if (!name.IsNullOrEmpty()) + { + double? b; + Name = ParseFieldName(name, out b); + Boost = b ?? boost; + ComparisonValue = Name; + } + } - double? b; - Field field = ParseFieldName(name, out b); - field.Boost = b ?? boost; - return field; + public Field(Expression expression, double? boost = null) + { + if (expression != null) + { + Expression = expression; + Boost = boost; + ComparisonValue = ComparisonValueFromExpression(expression); + } } - public static Field Create(Expression expression, double? boost = null) + public Field(PropertyInfo property, double? boost = null) { - Field field = expression; - field.Boost = boost; - return field; + if (property != null) + { + Property = property; + Boost = boost; + ComparisonValue = property; + } } private static string ParseFieldName(string name, out double? boost) { boost = null; + if (name == null) return null; + var parts = name.Split(new [] { '^' }, StringSplitOptions.RemoveEmptyEntries); if (parts.Length > 1) { @@ -50,43 +69,34 @@ private static string ParseFieldName(string name, out double? boost) return name; } - public static implicit operator Field(string name) - { - if (name.IsNullOrEmpty()) return null; - - double? boost; - name = ParseFieldName(name, out boost); - return new Field - { - Name = name, - ComparisonValue = name, - Boost = boost - - }; - } - - public static implicit operator Field(Expression expression) + private static object ComparisonValueFromExpression(Expression expression) { if (expression == null) return null; var lambda = expression as LambdaExpression; if (lambda == null) - return new Field { Expression = expression, ComparisonValue = expression.ToString() }; + return expression.ToString(); var memberExpression = lambda.Body as MemberExpression; if (memberExpression == null) - return new Field { Expression = expression, ComparisonValue = expression.ToString() }; - - return new Field { Expression = expression, ComparisonValue = memberExpression}; + return expression.ToString(); + + return memberExpression; + } + + public static implicit operator Field(string name) + { + return name.IsNullOrEmpty() ? null : new Field(name); + } + + public static implicit operator Field(Expression expression) + { + return expression == null ? null : new Field(expression); } public static implicit operator Field(PropertyInfo property) { - return property == null ? null : new Field - { - Property = property, - ComparisonValue = property - }; + return property == null ? null : new Field(property); } public override int GetHashCode() => ComparisonValue?.GetHashCode() ?? 0; diff --git a/src/Nest/CommonAbstractions/Infer/Fields/Fields.cs b/src/Nest/CommonAbstractions/Infer/Fields/Fields.cs index 683a1ea9bc5..096cca280a5 100644 --- a/src/Nest/CommonAbstractions/Infer/Fields/Fields.cs +++ b/src/Nest/CommonAbstractions/Infer/Fields/Fields.cs @@ -26,12 +26,12 @@ string IUrlParameter.GetString(IConnectionConfigurationValues settings) => public Fields And(Expression> field, double? boost = null) where T : class { - this.ListOfFields.Add(Field.Create(field, boost)); + this.ListOfFields.Add(new Field(field, boost)); return this; } public Fields And(string field, double? boost = null) { - this.ListOfFields.Add(Field.Create(field, boost)); + this.ListOfFields.Add(new Field(field, boost)); return this; } diff --git a/src/Nest/CommonAbstractions/Static/Static.cs b/src/Nest/CommonAbstractions/Static/Static.cs index f69e9312b40..c55cbe54d99 100644 --- a/src/Nest/CommonAbstractions/Static/Static.cs +++ b/src/Nest/CommonAbstractions/Static/Static.cs @@ -39,10 +39,9 @@ public static Fields Fields(params string[] fields) where T : class => /// The type of the object /// The path we want to specify /// An optional ^boost postfix, only make sense with certain queries - public static Field Field(Expression> path, double? boost = null) - where T : class => - Nest.Field.Create(path, boost); + public static Field Field(Expression> path, double? boost = null) + where T : class => new Nest.Field(path, boost); - public static Field Field(string field, double? boost = null) => Nest.Field.Create(field, boost); + public static Field Field(string field, double? boost = null) => new Nest.Field(field, boost); } } diff --git a/src/Nest/project.json b/src/Nest/project.json index 92bbbac21af..c487e087721 100644 --- a/src/Nest/project.json +++ b/src/Nest/project.json @@ -24,6 +24,7 @@ "url": "https://github.com/elastic/elasticsearch-net" }, "copyright": "2014-2016 Elasticsearch BV", + "releaseNotes": "See https://github.com/elastic/elasticsearch-net/releases", "version": "2.0.5", "compilationOptions": { "warningsAsErrors": false diff --git a/src/Profiling/Profiling.csproj b/src/Profiling/Profiling.csproj index 8fde3503d45..7f84c9bb9af 100644 --- a/src/Profiling/Profiling.csproj +++ b/src/Profiling/Profiling.csproj @@ -32,24 +32,6 @@ prompt 4 - - - - <__paket__xunit_core_props>win81\xunit.core - - - - - <__paket__xunit_core_props>wpa81\xunit.core - - - - - <__paket__xunit_core_props>portable-net45+win8+wp8+wpa81\xunit.core - - - - @@ -198,6 +180,24 @@ + + + + <__paket__xunit_core_props>win81\xunit.core + + + + + <__paket__xunit_core_props>wpa81\xunit.core + + + + + <__paket__xunit_core_props>portable-net45+win8+wp8+wpa81\xunit.core + + + +