diff --git a/.circleci/config.yml b/.circleci/config.yml index 0e28431e..6b6de30d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,19 +1,17 @@ version: 2.1 orbs: - win: circleci/windows@4.1.1 + rubocop: hanachin/rubocop@0.0.6 + win: circleci/windows@5.0 workflows: version: 2 test: jobs: + - rubocop/rubocop: + after-install-rubocop: + - run: gem install rubocop-performance - build-test-windows - - build-test-linux: - name: Ruby 2.5 - docker-image: cimg/ruby:2.5 - - build-test-linux: - name: Ruby 2.6 - docker-image: cimg/ruby:2.6 - build-test-linux: name: Ruby 2.7 docker-image: cimg/ruby:2.7 @@ -21,14 +19,16 @@ workflows: name: Ruby 3.0 docker-image: cimg/ruby:3.0 - build-test-linux: - name: JRuby 9.3 - docker-image: jruby:9.3-jdk + name: Ruby 3.1 + docker-image: cimg/ruby:3.1 + - build-test-linux: + name: JRuby 9.4 + docker-image: jruby:9.4-jdk jruby: true jobs: build-test-windows: - executor: - name: win/default + executor: win/default steps: - checkout @@ -48,15 +48,14 @@ jobs: - run: name: "Setup Consul" command: | - iwr -outf consul.zip https://releases.hashicorp.com/consul/1.4.2/consul_1.4.2_windows_amd64.zip + iwr -outf consul.zip https://releases.hashicorp.com/consul/1.14.3/consul_1.14.3_windows_amd64.zip mkdir consul Expand-Archive -Path consul.zip -DestinationPath consul - sc.exe create "Consul" binPath="C:/Users/circleci/project/consul/consul.exe agent -dev" - run: name: "Run Consul" background: true working_directory: consul - command: sc.exe start "Consul" + command: .\consul.exe agent -dev -client 0.0.0.0 - run: name: "Setup Redis" @@ -70,12 +69,11 @@ jobs: name: "Run Redis" background: true working_directory: redis - command: | - ./redis-server --service-start + command: ./redis-server --service-start - run: ruby -v - - run: choco install msys2 --allow-downgrade -y --version 20200903.0.0 - - run: ridk.cmd exec pacman -S --noconfirm --needed base-devel mingw-w64-x86_64-toolchain + - run: choco install msys2 -y + - run: ridk.cmd install 3 # Install MINGW dev toolchain - run: gem install bundler -v 2.2.33 - run: bundle _2.2.33_ install diff --git a/.gitignore b/.gitignore index d7b37d2f..d1ed1a09 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,4 @@ mkmf.log .DS_Store Gemfile.lock .ruby-version +contract-tests/contract-tests.iml diff --git a/.hound.yml b/.hound.yml deleted file mode 100644 index 2606b3b5..00000000 --- a/.hound.yml +++ /dev/null @@ -1,2 +0,0 @@ -ruby: - config_file: .rubocop diff --git a/.rubocop.yml b/.rubocop.yml index 85b05f8b..fe9b24ab 100644 --- a/.rubocop.yml +++ b/.rubocop.yml @@ -1,11 +1,23 @@ -AllCops: - Exclude: - - db/schema.rb +require: + - rubocop-performance -Style/AccessorMethodName: +AllCops: + TargetRubyVersion: 2.7 + Include: + - lib/**/*.rb + - spec/**/*.rb + - contract-tests/**/*.rb + NewCops: disable + +Naming/AccessorMethodName: Description: Check the naming of accessor methods for get_/set_. Enabled: false +Style/AccessModifierDeclarations: + Description: 'Access modifiers should be declared to apply to a group of methods or inline before each method, depending on configuration.' + StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#alias-method' + Enabled: false + Style/Alias: Description: 'Use alias_method instead of alias.' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#alias-method' @@ -21,16 +33,28 @@ Style/AsciiComments: StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#english-comments' Enabled: false -Style/AsciiIdentifiers: +Naming/AsciiIdentifiers: Description: 'Use only ascii symbols in identifiers.' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#english-identifiers' Enabled: false +Naming/VariableName: + Description: 'Makes sure that all variables use the configured style, snake_case or camelCase, for their names.' + Enabled: false + Style/Attr: Description: 'Checks for uses of Module#attr.' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#attr' Enabled: false +Metrics/AbcSize: + Description: 'Checks that the ABC size of methods is not higher than the configured maximum.' + Enabled: false + +Metrics/BlockLength: + Description: 'Checks if the length of a block exceeds some maximum value.' + Enabled: false + Metrics/BlockNesting: Description: 'Avoid excessive block nesting' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#three-is-the-number-thou-shalt-count' @@ -90,10 +114,6 @@ Metrics/CyclomaticComplexity: of test cases needed to validate a method. Enabled: false -Rails/Delegate: - Description: 'Prefer delegate method for delegations.' - Enabled: false - Style/PreferredHashMethods: Description: 'Checks for use of deprecated Hash methods.' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#hash-key' @@ -128,16 +148,20 @@ Style/EvenOdd: StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#predicate-methods' Enabled: false -Style/FileName: +Naming/FileName: Description: 'Use snake_case for source file names.' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#snake-case-files' Enabled: false -Style/FlipFlop: +Lint/FlipFlop: Description: 'Checks for flip flops' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#no-flip-flops' Enabled: false +Style/FrozenStringLiteralComment: + Description: 'Helps you transition from mutable string literals to frozen string literals.' + Enabled: false + Style/FormatString: Description: 'Enforce the use of Kernel#sprintf, Kernel#format or String#%.' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#sprintf' @@ -186,10 +210,10 @@ Style/LineEndConcatenation: line end. Enabled: false -Metrics/LineLength: +Layout/LineLength: Description: 'Limit lines to 150 characters.' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#80-character-limits' - Max: 150 + Max: 180 Metrics/MethodLength: Description: 'Avoid methods longer than 10 lines of code.' @@ -206,12 +230,12 @@ Style/NegatedIf: Favor unless over if for negative conditions (or control flow or). StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#unless-for-negatives' - Enabled: false + Enabled: true Style/NegatedWhile: Description: 'Favor until over while for negative conditions.' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#until-for-negatives' - Enabled: false + Enabled: true Style/Next: Description: 'Use `next` to skip iteration instead of a condition at the end.' @@ -242,7 +266,7 @@ Style/OneLineConditional: StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#ternary-operator' Enabled: false -Style/OpMethod: +Naming/BinaryOperatorParameterName: Description: 'When defining binary operators, name the argument other.' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#other-arg' Enabled: false @@ -252,6 +276,9 @@ Metrics/ParameterLists: StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#too-many-params' Enabled: false +Metrics/PerceivedComplexity: + Enabled: false + Style/PercentLiteralDelimiters: Description: 'Use `%`-literal delimiters consistently' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#percent-literal-braces' @@ -262,10 +289,10 @@ Style/PerlBackrefs: StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#no-perl-regexp-last-matchers' Enabled: false -Style/PredicateName: +Naming/PredicateName: Description: 'Check the names of predicate methods.' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#bool-methods-qmark' - NamePrefixBlacklist: + ForbiddenPrefixes: - is_ Exclude: - spec/**/* @@ -316,17 +343,20 @@ Style/StringLiterals: Description: 'Checks if uses of quotes match the configured preference.' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#consistent-string-literals' EnforcedStyle: double_quotes - Enabled: true + Enabled: false Style/TrailingCommaInArguments: Description: 'Checks for trailing comma in argument lists.' StyleGuide: '#no-trailing-params-comma' Enabled: true -Style/TrailingCommaInLiteral: +Style/TrailingCommaInArrayLiteral: Description: 'Checks for trailing comma in array and hash literals.' - StyleGuide: '#no-trailing-array-commas' - Enabled: true + EnforcedStyleForMultiline: comma + +Style/TrailingCommaInHashLiteral: + Description: 'Checks for trailing comma in array and hash literals.' + EnforcedStyleForMultiline: comma Style/TrivialAccessors: Description: 'Prefer attr_* methods to trivial readers/writers.' @@ -361,11 +391,12 @@ Style/WordArray: Layout/DotPosition: Description: 'Checks the position of the dot in multi-line method calls.' StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#consistent-multi-line-chains' - EnforcedStyle: trailing + EnforcedStyle: leading Layout/ExtraSpacing: Description: 'Do not use unnecessary spacing.' Enabled: true + AllowBeforeTrailingComments: true Layout/MultilineOperationIndentation: Description: >- @@ -379,6 +410,13 @@ Layout/InitialIndentation: Checks the indentation of the first non-blank non-comment line in a file. Enabled: false +Layout/SpaceInsideArrayLiteralBrackets: + Description: "Checks that brackets used for array literals have or don't have surrounding space depending on configuration." + Enabled: false + +Layout/TrailingWhitespace: + Description: "Ensures all trailing whitespace has been removed" + Enabled: true # Lint @@ -404,7 +442,7 @@ Lint/CircularArgumentReference: Description: "Don't refer to the keyword argument in the default value." Enabled: false -Lint/ConditionPosition: +Layout/ConditionPosition: Description: >- Checks for condition placed in a confusing position relative to the keyword. @@ -415,7 +453,7 @@ Lint/DeprecatedClassMethods: Description: 'Check for deprecated class method calls.' Enabled: false -Lint/DuplicatedKey: +Lint/DuplicateHashKey: Description: 'Check for duplicate keys in hash literals.' Enabled: false @@ -431,18 +469,12 @@ Lint/FormatParameterMismatch: Description: 'The number of parameters to format/sprint must match the fields.' Enabled: false -Lint/HandleExceptions: +Lint/SuppressedException: Description: "Don't suppress exception." StyleGuide: 'https://github.com/bbatsov/ruby-style-guide#dont-hide-exceptions' Enabled: false -Lint/InvalidCharacterLiteral: - Description: >- - Checks for invalid character literals with a non-escaped - whitespace character. - Enabled: false - -Lint/LiteralInCondition: +Lint/LiteralAsCondition: Description: 'Checks of literals used in conditions.' Enabled: false @@ -483,7 +515,7 @@ Lint/UnderscorePrefixedVariableName: Description: 'Do not use prefix `_` for a variable that is used.' Enabled: false -Lint/UnneededDisable: +Lint/RedundantCopDisableDirective: Description: >- Checks for rubocop:disable comments that can be removed. Note: this cop is not disabled when disabling all cops. @@ -529,7 +561,7 @@ Performance/ReverseEach: Reference: 'https://github.com/JuanitoFatas/fast-ruby#enumerablereverseeach-vs-enumerablereverse_each-code' Enabled: false -Performance/Sample: +Style/Sample: Description: >- Use `sample` instead of `shuffle.first`, `shuffle.last`, and `shuffle[Fixnum]`. @@ -551,50 +583,307 @@ Performance/StringReplacement: Reference: 'https://github.com/JuanitoFatas/fast-ruby#stringgsub-vs-stringtr-code' Enabled: false -# Rails +# Disabled temporarily while we bring code base inline +Layout/ArgumentAlignment: + Enabled: false + +Layout/ArrayAlignment: + Enabled: false -Rails/ActionFilter: - Description: 'Enforces consistent use of action filter methods.' +Layout/BlockEndNewline: Enabled: false -Rails/Date: - Description: >- - Checks the correct usage of date aware methods, - such as Date.today, Date.current etc. +Layout/CaseIndentation: Enabled: false -Rails/FindBy: - Description: 'Prefer find_by over where.first.' +Layout/ClosingHeredocIndentation: Enabled: false -Rails/FindEach: - Description: 'Prefer all.find_each over all.find.' +Layout/ClosingParenthesisIndentation: Enabled: false -Rails/HasAndBelongsToMany: - Description: 'Prefer has_many :through to has_and_belongs_to_many.' +Layout/CommentIndentation: Enabled: false -Rails/Output: - Description: 'Checks for calls to puts, print, etc.' +Layout/ElseAlignment: Enabled: false -Rails/ReadWriteAttribute: - Description: >- - Checks for read_attribute(:attr) and - write_attribute(:attr, val). +Layout/EmptyLineAfterGuardClause: + Enabled: false + +Layout/EmptyLineBetweenDefs: + Enabled: false + +Layout/EmptyLines: + Enabled: false + +Layout/EmptyLinesAroundBlockBody: + Enabled: false + +Layout/EmptyLinesAroundMethodBody: + Enabled: false + +Layout/EmptyLinesAroundModuleBody: + Enabled: false + +Layout/EndAlignment: + Enabled: false + +Layout/FirstArgumentIndentation: + Enabled: false + +Layout/FirstHashElementIndentation: + Enabled: false + +Layout/HashAlignment: + Enabled: false + +Layout/HeredocIndentation: + Enabled: false + +Layout/IndentationWidth: + Enabled: false + +Layout/LeadingCommentSpace: + Enabled: false + +Layout/LeadingEmptyLines: + Enabled: false + +Layout/MultilineArrayBraceLayout: + Enabled: false + +Layout/MultilineBlockLayout: + Enabled: false + +Layout/MultilineHashBraceLayout: + Enabled: false + +Layout/MultilineMethodCallBraceLayout: + Enabled: false + +Layout/MultilineMethodCallIndentation: + Enabled: false + +Layout/ParameterAlignment: + Enabled: false + +Layout/SpaceAfterComma: + Enabled: false + +Layout/SpaceAroundBlockParameters: + Enabled: false + +Layout/SpaceAroundEqualsInParameterDefault: + Enabled: false + +Layout/SpaceAroundOperators: + Enabled: false + +Layout/SpaceBeforeBlockBraces: + Enabled: false + +Layout/SpaceBeforeComma: + Enabled: false + +Layout/SpaceInsideBlockBraces: + Enabled: false + +Layout/SpaceInsideHashLiteralBraces: + Enabled: false + +Layout/SpaceInsideReferenceBrackets: + Enabled: false + +Layout/TrailingEmptyLines: + Enabled: false + +Lint/ConstantDefinitionInBlock: + Enabled: false + +Lint/IneffectiveAccessModifier: + Enabled: false + +Lint/MissingCopEnableDirective: + Enabled: false + +Lint/RedundantRequireStatement: + Enabled: false + +Lint/StructNewOverride: + Enabled: false + +Lint/UnusedBlockArgument: + Enabled: false + +Lint/UnusedMethodArgument: + Enabled: false + +Lint/UselessAccessModifier: + Enabled: false + +Lint/UselessAssignment: + Enabled: false + +Lint/UselessMethodDefinition: + Enabled: false + +Naming/BlockParameterName: + Enabled: false + +Naming/HeredocDelimiterNaming: + Enabled: false + +Naming/MethodParameterName: + Enabled: false + +Naming/RescuedExceptionsVariableName: + Enabled: false + +Naming/VariableNumber: + Enabled: false + +Style/AccessorGrouping: + Enabled: false + +Style/AndOr: + Enabled: false + +Style/BlockDelimiters: + Enabled: false + +Style/CaseLikeIf: + Enabled: false + +Style/CombinableLoops: + Enabled: false + +Style/CommentedKeyword: + Enabled: false + +Style/ConditionalAssignment: + Enabled: false + +Style/DefWithParentheses: + Enabled: false + +Style/EmptyElse: + Enabled: false + +Style/EmptyMethod: + Enabled: false + +Style/ExplicitBlockArgument: + Enabled: false + +Style/For: + Enabled: false + +Style/FormatStringToken: + Enabled: false + +Style/GlobalStdStream: + Enabled: false + +Style/HashEachMethods: + Enabled: false + +Style/HashSyntax: + Enabled: false + +Style/InfiniteLoop: + Enabled: false + +Style/InverseMethods: + Enabled: false + +Style/MethodCallWithoutArgsParentheses: + Enabled: false + +Style/MissingRespondToMissing: + Enabled: false + +Style/MultilineIfThen: + Enabled: false + +Style/MultilineTernaryOperator: + Enabled: false + +Style/MultipleComparison: + Enabled: false + +Style/MutableConstant: + Enabled: false + +Style/NumericPredicate: + Enabled: false + +Style/OptionalBooleanParameter: + Enabled: false + +Style/ParallelAssignment: + Enabled: false + +Style/RedundantAssignment: + Enabled: false + +Style/RedundantBegin: + Enabled: false + +Style/RedundantCondition: + Enabled: true + +Style/RedundantException: + Enabled: false + +Style/RedundantFileExtensionInRequire: + Enabled: false + +Style/RedundantParentheses: + Enabled: true + +Style/RedundantRegexpEscape: + Enabled: false + +Style/RedundantReturn: + Enabled: true + +Style/RedundantSelf: + Enabled: false + +Style/RescueStandardError: + Enabled: false + +Style/SafeNavigation: + Enabled: false + +Style/Semicolon: + Enabled: true + AllowAsExpressionSeparator: true + +Style/SlicingWithRange: + Enabled: false + +Style/SoleNestedConditional: + Enabled: false + +Style/StringConcatenation: + Enabled: false + +Style/SymbolArray: + Enabled: false + +Style/SymbolProc: + Enabled: false + +Style/TernaryParentheses: Enabled: false -Rails/ScopeArgs: - Description: 'Checks the arguments of ActiveRecord scopes.' +Style/TrailingUnderscoreVariable: Enabled: false -Rails/TimeZone: - Description: 'Checks the correct usage of time zone aware methods.' - StyleGuide: 'https://github.com/bbatsov/rails-style-guide#time' - Reference: 'http://danilenko.org/2012/7/6/rails_timezones' +Style/WhileUntilDo: Enabled: false -Rails/Validation: - Description: 'Use validates :attribute, hash of validations.' +Style/ZeroLengthPredicate: Enabled: false diff --git a/Makefile b/Makefile index 5b264f57..07676969 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,9 @@ TEMP_TEST_OUTPUT=/tmp/contract-test-service.log +# TEST_HARNESS_PARAMS can be set to add -skip parameters for any contract tests that cannot yet pass +# Explanation of current skips: +TEST_HARNESS_PARAMS= + build-contract-tests: @cd contract-tests && bundle _2.2.33_ install @@ -11,8 +15,8 @@ start-contract-test-service-bg: @make start-contract-test-service >$(TEMP_TEST_OUTPUT) 2>&1 & run-contract-tests: - @curl -s https://raw.githubusercontent.com/launchdarkly/sdk-test-harness/v1.0.0/downloader/run.sh \ - | VERSION=v1 PARAMS="-url http://localhost:9000 -debug -stop-service-at-end $(TEST_HARNESS_PARAMS)" sh + @curl -s https://raw.githubusercontent.com/launchdarkly/sdk-test-harness/main/downloader/run.sh \ + | VERSION=v2 PARAMS="-url http://localhost:9000 -debug -stop-service-at-end $(TEST_HARNESS_PARAMS)" sh contract-tests: build-contract-tests start-contract-test-service-bg run-contract-tests diff --git a/contract-tests/Gemfile b/contract-tests/Gemfile index 48b8812f..4e343a12 100644 --- a/contract-tests/Gemfile +++ b/contract-tests/Gemfile @@ -8,3 +8,5 @@ gem 'sinatra', '~> 2.1' gem 'glassfish', :platforms => :jruby gem 'thin', :platforms => :ruby gem 'json' +gem 'rubocop', '~> 1.37', group: 'development' +gem 'rubocop-performance', '~> 1.15', group: 'development' diff --git a/contract-tests/big_segment_store_fixture.rb b/contract-tests/big_segment_store_fixture.rb new file mode 100644 index 00000000..b22552ad --- /dev/null +++ b/contract-tests/big_segment_store_fixture.rb @@ -0,0 +1,24 @@ +require 'http' + +class BigSegmentStoreFixture + def initialize(uri) + @uri = uri + end + + def get_metadata + response = HTTP.post("#{@uri}/getMetadata") + json = response.parse(:json) + LaunchDarkly::Interfaces::BigSegmentStoreMetadata.new(json['lastUpToDate']) + end + + def get_membership(context_hash) + response = HTTP.post("#{@uri}/getMembership", :json => {:contextHash => context_hash}) + json = response.parse(:json) + + json['values'] + end + + def stop + HTTP.delete(@uri) + end +end diff --git a/contract-tests/client_entity.rb b/contract-tests/client_entity.rb index 1f5f0fe2..dc2043d4 100644 --- a/contract-tests/client_entity.rb +++ b/contract-tests/client_entity.rb @@ -1,6 +1,8 @@ require 'ld-eventsource' require 'json' require 'net/http' +require 'launchdarkly-server-sdk' +require './big_segment_store_fixture' class ClientEntity def initialize(log, config) @@ -12,13 +14,13 @@ def initialize(log, config) if config[:streaming] streaming = config[:streaming] - opts[:stream_uri] = streaming[:baseUri] if !streaming[:baseUri].nil? - opts[:initial_reconnect_delay] = streaming[:initialRetryDelayMs] / 1_000.0 if !streaming[:initialRetryDelayMs].nil? + opts[:stream_uri] = streaming[:baseUri] unless streaming[:baseUri].nil? + opts[:initial_reconnect_delay] = streaming[:initialRetryDelayMs] / 1_000.0 unless streaming[:initialRetryDelayMs].nil? elsif config[:polling] polling = config[:polling] opts[:stream] = false - opts[:base_uri] = polling[:baseUri] if !polling[:baseUri].nil? - opts[:poll_interval] = polling[:pollIntervalMs] / 1_000.0 if !polling[:pollIntervalMs].nil? + opts[:base_uri] = polling[:baseUri] unless polling[:baseUri].nil? + opts[:poll_interval] = polling[:pollIntervalMs] / 1_000.0 unless polling[:pollIntervalMs].nil? end if config[:events] @@ -26,14 +28,30 @@ def initialize(log, config) opts[:events_uri] = events[:baseUri] if events[:baseUri] opts[:capacity] = events[:capacity] if events[:capacity] opts[:diagnostic_opt_out] = !events[:enableDiagnostics] - opts[:all_attributes_private] = !!events[:allAttributesPrivate] - opts[:private_attribute_names] = events[:globalPrivateAttributes] - opts[:flush_interval] = (events[:flushIntervalMs] / 1_000) if !events[:flushIntervalMs].nil? - opts[:inline_users_in_events] = events[:inlineUsers] || false + opts[:all_attributes_private] = !!events[:allAttributesPrivate] + opts[:private_attributes] = events[:globalPrivateAttributes] + opts[:flush_interval] = (events[:flushIntervalMs] / 1_000) unless events[:flushIntervalMs].nil? else opts[:send_events] = false end + if config[:bigSegments] + big_segments = config[:bigSegments] + + store = BigSegmentStoreFixture.new(config[:bigSegments][:callbackUri]) + context_cache_time = big_segments[:userCacheTimeMs].nil? ? nil : big_segments[:userCacheTimeMs] / 1_000 + status_poll_interval_ms = big_segments[:statusPollIntervalMs].nil? ? nil : big_segments[:statusPollIntervalMs] / 1_000 + stale_after_ms = big_segments[:staleAfterMs].nil? ? nil : big_segments[:staleAfterMs] / 1_000 + + opts[:big_segments] = LaunchDarkly::BigSegmentsConfig.new( + store: store, + context_cache_size: big_segments[:userCacheSize], + context_cache_time: context_cache_time, + status_poll_interval: status_poll_interval_ms, + stale_after: stale_after_ms + ) + end + if config[:tags] opts[:application] = { :id => config[:tags][:applicationId], @@ -57,12 +75,12 @@ def evaluate(params) response = {} if params[:detail] - detail = @client.variation_detail(params[:flagKey], params[:user], params[:defaultValue]) + detail = @client.variation_detail(params[:flagKey], params[:context] || params[:user], params[:defaultValue]) response[:value] = detail.value response[:variationIndex] = detail.variation_index response[:reason] = detail.reason else - response[:value] = @client.variation(params[:flagKey], params[:user], params[:defaultValue]) + response[:value] = @client.variation(params[:flagKey], params[:context] || params[:user], params[:defaultValue]) end response @@ -74,25 +92,30 @@ def evaluate_all(params) opts[:with_reasons] = params[:withReasons] || false opts[:details_only_for_tracked_flags] = params[:detailsOnlyForTrackedFlags] || false - @client.all_flags_state(params[:user], opts) + @client.all_flags_state(params[:context] || params[:user], opts) end - def track(params) - @client.track(params[:eventKey], params[:user], params[:data], params[:metricValue]) + def secure_mode_hash(params) + @client.secure_mode_hash(params[:context] || params[:user]) end - def identify(params) - @client.identify(params[:user]) + def track(params) + @client.track(params[:eventKey], params[:context] || params[:user], params[:data], params[:metricValue]) end - def alias(params) - @client.alias(params[:user], params[:previousUser]) + def identify(params) + @client.identify(params[:context] || params[:user]) end def flush_events @client.flush end + def get_big_segment_store_status + status = @client.big_segment_store_status_provider.status + { available: status.available, stale: status.stale } + end + def log @log end diff --git a/contract-tests/service.rb b/contract-tests/service.rb index 68b00288..c542f6a2 100644 --- a/contract-tests/service.rb +++ b/contract-tests/service.rb @@ -4,7 +4,7 @@ require 'net/http' require 'sinatra' -require './client_entity.rb' +require './client_entity' configure :development do disable :show_exceptions @@ -26,11 +26,14 @@ capabilities: [ 'server-side', 'server-side-polling', + 'big-segments', 'all-flags-with-reasons', 'all-flags-client-side-only', 'all-flags-details-only-for-tracked-flags', + 'secure-mode-hash', + 'user-type', 'tags', - ] + ], }.to_json end @@ -83,18 +86,21 @@ when "evaluateAll" response = {:state => client.evaluate_all(params[:evaluateAll])} return [200, nil, response.to_json] + when "secureModeHash" + response = {:result => client.secure_mode_hash(params[:secureModeHash])} + return [200, nil, response.to_json] when "customEvent" client.track(params[:customEvent]) return 201 when "identifyEvent" client.identify(params[:identifyEvent]) return 201 - when "aliasEvent" - client.alias(params[:aliasEvent]) - return 201 when "flushEvents" client.flush_events return 201 + when "getBigSegmentStoreStatus" + status = client.get_big_segment_store_status + return [200, nil, status.to_json] end return [400, nil, {:error => "Unknown command requested"}.to_json] diff --git a/launchdarkly-server-sdk.gemspec b/launchdarkly-server-sdk.gemspec index 04262469..e45f9206 100644 --- a/launchdarkly-server-sdk.gemspec +++ b/launchdarkly-server-sdk.gemspec @@ -1,6 +1,6 @@ # coding: utf-8 -lib = File.expand_path("../lib", __FILE__) +lib = File.expand_path("lib", __dir__) $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) require "ldclient-rb/version" require "rake" @@ -19,20 +19,20 @@ Gem::Specification.new do |spec| spec.files = FileList["lib/**/*", "README.md", "LICENSE.txt"] spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } spec.require_paths = ["lib"] - spec.required_ruby_version = ">= 2.5.0" + spec.required_ruby_version = ">= 2.7.0" spec.add_development_dependency "aws-sdk-dynamodb", "~> 1.57" spec.add_development_dependency "bundler", "2.2.33" spec.add_development_dependency "rspec", "~> 3.10" - spec.add_development_dependency "diplomat", "~> 2.4.2" - spec.add_development_dependency "redis", "~> 4.2" - spec.add_development_dependency "connection_pool", "~> 2.2.3" + spec.add_development_dependency "diplomat", "~> 2.6" + spec.add_development_dependency "redis", "~> 5.0" + spec.add_development_dependency "connection_pool", "~> 2.3" spec.add_development_dependency "rspec_junit_formatter", "~> 0.4" spec.add_development_dependency "timecop", "~> 0.9" spec.add_development_dependency "listen", "~> 3.3" # see file_data_source.rb spec.add_development_dependency "webrick", "~> 1.7" - # required by dynamodb - spec.add_development_dependency "oga", "~> 2.2" + spec.add_development_dependency "rubocop", "~> 1.37" + spec.add_development_dependency "rubocop-performance", "~> 1.15" spec.add_runtime_dependency "semantic", "~> 1.6" spec.add_runtime_dependency "concurrent-ruby", "~> 1.1" diff --git a/lib/ldclient-rb.rb b/lib/ldclient-rb.rb index 2bff8c8f..80d5adec 100644 --- a/lib/ldclient-rb.rb +++ b/lib/ldclient-rb.rb @@ -15,13 +15,12 @@ module LaunchDarkly require "ldclient-rb/memoized_value" require "ldclient-rb/in_memory_store" require "ldclient-rb/config" -require "ldclient-rb/newrelic" +require "ldclient-rb/context" +require "ldclient-rb/reference" require "ldclient-rb/stream" require "ldclient-rb/polling" -require "ldclient-rb/user_filter" require "ldclient-rb/simple_lru_cache" require "ldclient-rb/non_blocking_thread_pool" require "ldclient-rb/events" require "ldclient-rb/requestor" -require "ldclient-rb/file_data_source" require "ldclient-rb/integrations" diff --git a/lib/ldclient-rb/config.rb b/lib/ldclient-rb/config.rb index 15e302ea..498d33d6 100644 --- a/lib/ldclient-rb/config.rb +++ b/lib/ldclient-rb/config.rb @@ -13,6 +13,18 @@ class Config # # Constructor for creating custom LaunchDarkly configurations. # + # `user_keys_capacity` and `user_keys_flush_interval` are deprecated + # configuration options. They exist to maintain backwards compatibility + # with previous configurations. Newer code should prefer their replacement + # options -- `context_keys_capacity` and `context_keys_flush_interval`. + # + # In the event both the user and context variations are provided, the + # context specific configuration option will take precedence. + # + # Similarly, `private_attribute_names` is deprecated. Newer code should + # prefer `private_attributes`. If both are provided, `private_attributes` + # will take precedence. + # # @param opts [Hash] the configuration options # @option opts [Logger] :logger See {#logger}. # @option opts [String] :base_uri ("https://sdk.launchdarkly.com") See {#base_uri}. @@ -31,13 +43,13 @@ class Config # @option opts [Boolean] :stream (true) See {#stream?}. # @option opts [Boolean] all_attributes_private (false) See {#all_attributes_private}. # @option opts [Array] :private_attribute_names See {#private_attribute_names}. + # @option opts [Array] :private_attributes See {#private_attributes}. # @option opts [Boolean] :send_events (true) See {#send_events}. # @option opts [Integer] :user_keys_capacity (1000) See {#user_keys_capacity}. + # @option opts [Integer] :context_keys_capacity (1000) See {#context_keys_capacity}. # @option opts [Float] :user_keys_flush_interval (300) See {#user_keys_flush_interval}. - # @option opts [Boolean] :inline_users_in_events (false) See {#inline_users_in_events}. + # @option opts [Float] :context_keys_flush_interval (300) See {#context_keys_flush_interval}. # @option opts [Object] :data_source See {#data_source}. - # @option opts [Object] :update_processor Obsolete synonym for `data_source`. - # @option opts [Object] :update_processor_factory Obsolete synonym for `data_source`. # @option opts [Boolean] :diagnostic_opt_out (false) See {#diagnostic_opt_out?}. # @option opts [Float] :diagnostic_recording_interval (900) See {#diagnostic_recording_interval}. # @option opts [String] :wrapper_name See {#wrapper_name}. @@ -63,14 +75,11 @@ def initialize(opts = {}) @offline = opts.has_key?(:offline) ? opts[:offline] : Config.default_offline @poll_interval = opts.has_key?(:poll_interval) && opts[:poll_interval] > Config.default_poll_interval ? opts[:poll_interval] : Config.default_poll_interval @all_attributes_private = opts[:all_attributes_private] || false - @private_attribute_names = opts[:private_attribute_names] || [] + @private_attributes = opts[:private_attributes] || opts[:private_attribute_names] || [] @send_events = opts.has_key?(:send_events) ? opts[:send_events] : Config.default_send_events - @user_keys_capacity = opts[:user_keys_capacity] || Config.default_user_keys_capacity - @user_keys_flush_interval = opts[:user_keys_flush_interval] || Config.default_user_keys_flush_interval - @inline_users_in_events = opts[:inline_users_in_events] || false - @data_source = opts[:data_source] || opts[:update_processor] || opts[:update_processor_factory] - @update_processor = opts[:update_processor] - @update_processor_factory = opts[:update_processor_factory] + @context_keys_capacity = opts[:context_keys_capacity] || opts[:user_keys_capacity] || Config.default_context_keys_capacity + @context_keys_flush_interval = opts[:context_keys_flush_interval] || opts[:user_keys_flush_interval] || Config.default_user_keys_flush_interval + @data_source = opts[:data_source] @diagnostic_opt_out = opts.has_key?(:diagnostic_opt_out) && opts[:diagnostic_opt_out] @diagnostic_recording_interval = opts.has_key?(:diagnostic_recording_interval) && opts[:diagnostic_recording_interval] > Config.minimum_diagnostic_recording_interval ? opts[:diagnostic_recording_interval] : Config.default_diagnostic_recording_interval @@ -126,7 +135,7 @@ def stream? def use_ldd? @use_ldd end - + # # Whether the client should be initialized in offline mode. In offline mode, default values are # returned for all flags and no remote network requests are made. @@ -209,28 +218,37 @@ def offline? attr_reader :feature_store # - # True if all user attributes (other than the key) should be considered private. This means + # True if all context attributes (other than the key) should be considered private. This means # that the attribute values will not be sent to LaunchDarkly in analytics events and will not # appear on the LaunchDarkly dashboard. # @return [Boolean] - # @see #private_attribute_names + # @see #private_attributes # attr_reader :all_attributes_private # - # A list of user attribute names that should always be considered private. This means that the + # A list of context attribute names that should always be considered private. This means that the # attribute values will not be sent to LaunchDarkly in analytics events and will not appear on # the LaunchDarkly dashboard. # - # You can also specify the same behavior for an individual flag evaluation by storing an array - # of attribute names in the `:privateAttributeNames` property (note camelcase name) of the - # user object. + # You can also specify the same behavior for an individual flag evaluation + # by providing the context object with a list of private attributes. + # + # @see https://docs.launchdarkly.com/sdk/features/user-context-config#using-private-attributes # # @return [Array] # @see #all_attributes_private # - attr_reader :private_attribute_names - + attr_reader :private_attributes + + # + # @deprecated Backwards compatibility alias for #private_attributes. + # + # @return [Integer] + # @see #private_attributes + # + alias :private_attribute_names :private_attributes + # # Whether to send events back to LaunchDarkly. This differs from {#offline?} in that it affects # only the sending of client-side events, not streaming or polling for events from the server. @@ -239,27 +257,35 @@ def offline? attr_reader :send_events # - # The number of user keys that the event processor can remember at any one time. This reduces the - # amount of duplicate user details sent in analytics events. + # The number of context keys that the event processor can remember at any one time. This reduces the + # amount of duplicate context details sent in analytics events. + # @return [Integer] + # @see #context_keys_flush_interval + # + attr_reader :context_keys_capacity + + # + # @deprecated Backwards compatibility alias for #context_keys_capacity. + # # @return [Integer] - # @see #user_keys_flush_interval + # @see #context_keys_flush_interval # - attr_reader :user_keys_capacity + alias :user_keys_capacity :context_keys_capacity # - # The interval in seconds at which the event processor will reset its set of known user keys. + # The interval in seconds at which the event processor will reset its set of known context keys. # @return [Float] - # @see #user_keys_capacity + # @see #context_keys_capacity # - attr_reader :user_keys_flush_interval + attr_reader :context_keys_flush_interval # - # Whether to include full user details in every analytics event. By default, events will only - # include the user key, except for one "index" event that provides the full details for the user. - # The only reason to change this is if you are using the Analytics Data Stream. - # @return [Boolean] + # @deprecated Backwards compatibility alias for #context_keys_flush_interval. # - attr_reader :inline_users_in_events + # @return [Integer] + # @see #context_keys_flush_interval + # + alias :user_keys_flush_interval :context_keys_flush_interval # # An object that is responsible for receiving feature flag data from LaunchDarkly. By default, @@ -279,7 +305,7 @@ def offline? # # Configuration options related to Big Segments. # - # Big Segments are a specific type of user segments. For more information, read the LaunchDarkly + # Big Segments are a specific type of segments. For more information, read the LaunchDarkly # documentation: https://docs.launchdarkly.com/home/users/big-segments # # @return [BigSegmentsConfig] @@ -304,12 +330,6 @@ def offline? # attr_reader :application - # @deprecated This is replaced by {#data_source}. - attr_reader :update_processor - - # @deprecated This is replaced by {#data_source}. - attr_reader :update_processor_factory - # # Set to true to opt out of sending diagnostics data. # @@ -446,8 +466,8 @@ def self.default_connect_timeout # def self.default_logger if defined?(Rails) && Rails.respond_to?(:logger) - Rails.logger - else + Rails.logger + else log = ::Logger.new($stdout) log.level = ::Logger::WARN log @@ -503,21 +523,33 @@ def self.default_send_events end # - # The default value for {#user_keys_capacity}. + # The default value for {#context_keys_capacity}. # @return [Integer] 1000 # - def self.default_user_keys_capacity + def self.default_context_keys_capacity 1000 end # - # The default value for {#user_keys_flush_interval}. + # The default value for {#context_keys_flush_interval}. # @return [Float] 300 # - def self.default_user_keys_flush_interval + def self.default_context_keys_flush_interval 300 end + class << self + # + # @deprecated Backwards compatibility alias for #default_context_keys_capacity + # + alias :default_user_keys_capacity :default_context_keys_capacity + + # + # @deprecated Backwards compatibility alias for #default_context_keys_flush_interval + # + alias :default_user_keys_flush_interval :default_context_keys_flush_interval + end + # # The default value for {#diagnostic_recording_interval}. # @return [Float] 900 @@ -538,7 +570,7 @@ def self.minimum_diagnostic_recording_interval # # Configuration options related to Big Segments. # - # Big Segments are a specific type of user segments. For more information, read the LaunchDarkly + # Big Segments are a specific type of segments. For more information, read the LaunchDarkly # documentation: https://docs.launchdarkly.com/home/users/big-segments # # If your application uses Big Segments, you will need to create a `BigSegmentsConfig` that at a @@ -552,8 +584,8 @@ def self.minimum_diagnostic_recording_interval # client = LaunchDarkly::LDClient.new(my_sdk_key, config) # class BigSegmentsConfig - DEFAULT_USER_CACHE_SIZE = 1000 - DEFAULT_USER_CACHE_TIME = 5 + DEFAULT_CONTEXT_CACHE_SIZE = 1000 + DEFAULT_CONTEXT_CACHE_TIME = 5 DEFAULT_STATUS_POLL_INTERVAL = 5 DEFAULT_STALE_AFTER = 2 * 60 @@ -561,15 +593,15 @@ class BigSegmentsConfig # Constructor for setting Big Segments options. # # @param store [LaunchDarkly::Interfaces::BigSegmentStore] the data store implementation - # @param user_cache_size [Integer] See {#user_cache_size}. - # @param user_cache_time [Float] See {#user_cache_time}. + # @param context_cache_size [Integer] See {#context_cache_size}. + # @param context_cache_time [Float] See {#context_cache_time}. # @param status_poll_interval [Float] See {#status_poll_interval}. # @param stale_after [Float] See {#stale_after}. # - def initialize(store:, user_cache_size: nil, user_cache_time: nil, status_poll_interval: nil, stale_after: nil) + def initialize(store:, context_cache_size: nil, context_cache_time: nil, status_poll_interval: nil, stale_after: nil) @store = store - @user_cache_size = user_cache_size.nil? ? DEFAULT_USER_CACHE_SIZE : user_cache_size - @user_cache_time = user_cache_time.nil? ? DEFAULT_USER_CACHE_TIME : user_cache_time + @context_cache_size = context_cache_size.nil? ? DEFAULT_CONTEXT_CACHE_SIZE : context_cache_size + @context_cache_time = context_cache_time.nil? ? DEFAULT_CONTEXT_CACHE_TIME : context_cache_time @status_poll_interval = status_poll_interval.nil? ? DEFAULT_STATUS_POLL_INTERVAL : status_poll_interval @stale_after = stale_after.nil? ? DEFAULT_STALE_AFTER : stale_after end @@ -579,14 +611,28 @@ def initialize(store:, user_cache_size: nil, user_cache_time: nil, status_poll_i # @return [LaunchDarkly::Interfaces::BigSegmentStore] attr_reader :store - # The maximum number of users whose Big Segment state will be cached by the SDK at any given time. + # The maximum number of contexts whose Big Segment state will be cached by the SDK at any given time. + # @return [Integer] + attr_reader :context_cache_size + + # + # @deprecated Backwards compatibility alias for #context_cache_size + # # @return [Integer] - attr_reader :user_cache_size + # + alias :user_cache_size :context_cache_size - # The maximum length of time (in seconds) that the Big Segment state for a user will be cached + # The maximum length of time (in seconds) that the Big Segment state for a context will be cached # by the SDK. # @return [Float] - attr_reader :user_cache_time + attr_reader :context_cache_time + + # + # @deprecated Backwards compatibility alias for #context_cache_time + # + # @return [Float] + # + alias :user_cache_time :context_cache_time # The interval (in seconds) at which the SDK will poll the Big Segment store to make sure it is # available and to determine how long ago it was updated. diff --git a/lib/ldclient-rb/context.rb b/lib/ldclient-rb/context.rb new file mode 100644 index 00000000..6941e521 --- /dev/null +++ b/lib/ldclient-rb/context.rb @@ -0,0 +1,487 @@ +require 'set' +require 'ldclient-rb/impl/context' +require 'ldclient-rb/reference' + +module LaunchDarkly + # LDContext is a collection of attributes that can be referenced in flag + # evaluations and analytics events. + # + # To create an LDContext of a single kind, such as a user, you may use + # {LDContext#create} or {LDContext#with_key}. + # + # To create an LDContext with multiple kinds, use {LDContext#create_multi}. + # + # Each factory method will always return an LDContext. However, that + # LDContext may be invalid. You can check the validity of the resulting + # context, and the associated errors by calling {LDContext#valid?} and + # {LDContext#error} + class LDContext + KIND_DEFAULT = "user" + KIND_MULTI = "multi" + + ERR_NOT_HASH = 'context data is not a hash' + private_constant :ERR_NOT_HASH + ERR_KEY_EMPTY = 'context key must not be null or empty' + private_constant :ERR_KEY_EMPTY + ERR_KIND_MULTI_NON_CONTEXT_ARRAY = 'context data must be an array of valid LDContexts' + private_constant :ERR_KIND_MULTI_NON_CONTEXT_ARRAY + ERR_KIND_MULTI_CANNOT_CONTAIN_MULTI = 'multi-kind context cannot contain another multi-kind context' + private_constant :ERR_KIND_MULTI_CANNOT_CONTAIN_MULTI + ERR_KIND_MULTI_WITH_NO_KINDS = 'multi-context must contain at least one kind' + private_constant :ERR_KIND_MULTI_WITH_NO_KINDS + ERR_KIND_MULTI_DUPLICATES = 'multi-kind context cannot have same kind more than once' + private_constant :ERR_KIND_MULTI_DUPLICATES + ERR_CUSTOM_NON_HASH = 'context custom must be a hash' + private_constant :ERR_CUSTOM_NON_HASH + ERR_PRIVATE_NON_ARRAY = 'context private attributes must be an array' + + # @return [String, nil] Returns the key for this context + attr_reader :key + + # @return [String, nil] Returns the fully qualified key for this context + attr_reader :fully_qualified_key + + # @return [String, nil] Returns the kind for this context + attr_reader :kind + + # @return [String, nil] Returns the error associated with this LDContext if invalid + attr_reader :error + + # @return [Array] Returns the private attributes associated with this LDContext + attr_reader :private_attributes + + # + # @private + # @param key [String, nil] + # @param fully_qualified_key [String, nil] + # @param kind [String, nil] + # @param name [String, nil] + # @param anonymous [Boolean, nil] + # @param attributes [Hash, nil] + # @param private_attributes [Array, nil] + # @param error [String, nil] + # @param contexts [Array, nil] + # + def initialize(key, fully_qualified_key, kind, name = nil, anonymous = nil, attributes = nil, private_attributes = nil, error = nil, contexts = nil) + @key = key + @fully_qualified_key = fully_qualified_key + @kind = kind + @name = name + @anonymous = anonymous || false + @attributes = attributes + @private_attributes = [] + (private_attributes || []).each do |attribute| + reference = Reference.create(attribute) + @private_attributes << reference if reference.error.nil? + end + @error = error + @contexts = contexts + @is_multi = !contexts.nil? + end + private_class_method :new + + # + # @return [Boolean] Is this LDContext a multi-kind context? + # + def multi_kind? + @is_multi + end + + # + # @return [Boolean] Determine if this LDContext is considered valid + # + def valid? + @error.nil? + end + + # + # Returns a hash mapping each context's kind to its key. + # + # @return [Hash] + # + def keys + return {} unless valid? + return Hash[kind, key] unless multi_kind? + + @contexts.map { |c| [c.kind, c.key] }.to_h + end + + # + # Returns an array of context kinds. + # + # @return [Array] + # + def kinds + return [] unless valid? + return [kind] unless multi_kind? + + @contexts.map { |c| c.kind } + end + + # + # Return an array of top level attribute keys (excluding built-in attributes) + # + # @return [Array] + # + def get_custom_attribute_names + return [] if @attributes.nil? + + @attributes.keys + end + + # + # get_value looks up the value of any attribute of the Context by name. + # This includes only attributes that are addressable in evaluations-- not + # metadata such as private attributes. + # + # For a single-kind context, the attribute name can be any custom attribute. + # It can also be one of the built-in ones like "kind", "key", or "name". + # + # For a multi-kind context, the only supported attribute name is "kind". + # Use {#individual_context} to inspect a Context for a particular kind and + # then get its attributes. + # + # This method does not support complex expressions for getting individual + # values out of JSON objects or arrays, such as "/address/street". Use + # {#get_value_for_reference} for that purpose. + # + # If the value is found, the return value is the attribute value; + # otherwise, it is nil. + # + # @param attribute [String, Symbol] + # @return [any] + # + def get_value(attribute) + reference = Reference.create_literal(attribute) + get_value_for_reference(reference) + end + + # + # get_value_for_reference looks up the value of any attribute of the + # Context, or a value contained within an attribute, based on a {Reference} + # instance. This includes only attributes that are addressable in + # evaluations-- not metadata such as private attributes. + # + # This implements the same behavior that the SDK uses to resolve attribute + # references during a flag evaluation. In a single-kind context, the + # {Reference} can represent a simple attribute name-- either a built-in one + # like "name" or "key", or a custom attribute -- or, it can be a + # slash-delimited path using a JSON-Pointer-like syntax. See {Reference} + # for more details. + # + # For a multi-kind context, the only supported attribute name is "kind". + # Use {#individual_context} to inspect a Context for a particular kind and + # then get its attributes. + # + # If the value is found, the return value is the attribute value; + # otherwise, it is nil. + # + # @param reference [Reference] + # @return [any] + # + def get_value_for_reference(reference) + return nil unless valid? + return nil unless reference.is_a?(Reference) + return nil unless reference.error.nil? + + first_component = reference.component(0) + return nil if first_component.nil? + + if multi_kind? + if reference.depth == 1 && first_component == :kind + return kind + end + + # Multi-kind contexts have no other addressable attributes + return nil + end + + value = get_top_level_addressable_attribute_single_kind(first_component) + return nil if value.nil? + + (1...reference.depth).each do |i| + name = reference.component(i) + + return nil unless value.is_a?(Hash) + return nil unless value.has_key?(name) + + value = value[name] + end + + value + end + + # + # Returns the number of context kinds in this context. + # + # For a valid individual context, this returns 1. For a multi-context, it + # returns the number of context kinds. For an invalid context, it returns + # zero. + # + # @return [Integer] the number of context kinds + # + def individual_context_count + return 0 unless valid? + return 1 if @contexts.nil? + @contexts.count + end + + # + # Returns the single-kind LDContext corresponding to one of the kinds in + # this context. + # + # The `kind` parameter can be either a number representing a zero-based + # index, or a string representing a context kind. + # + # If this method is called on a single-kind LDContext, then the only + # allowable value for `kind` is either zero or the same value as {#kind}, + # and the return value on success is the same LDContext. + # + # If the method is called on a multi-context, and `kind` is a number, it + # must be a non-negative index that is less than the number of kinds (that + # is, less than the return value of {#individual_context_count}, and the + # return value on success is one of the individual LDContexts within. Or, + # if `kind` is a string, it must match the context kind of one of the + # individual contexts. + # + # If there is no context corresponding to `kind`, the method returns nil. + # + # @param kind [Integer, String] the index or string value of a context kind + # @return [LDContext, nil] the context corresponding to that index or kind, + # or null if none. + # + def individual_context(kind) + return nil unless valid? + + if kind.is_a?(Integer) + unless multi_kind? + return kind == 0 ? self : nil + end + + return kind >= 0 && kind < @contexts.count ? @contexts[kind] : nil + end + + return nil unless kind.is_a?(String) + + unless multi_kind? + return self.kind == kind ? self : nil + end + + @contexts.each do |context| + return context if context.kind == kind + end + + nil + end + + # + # Retrieve the value of any top level, addressable attribute. + # + # This method returns an array of two values. The first element is the + # value of the requested attribute or nil if it does not exist. The second + # value will be true if the attribute exists; otherwise, it will be false. + # + # @param name [Symbol] + # @return [any] + # + private def get_top_level_addressable_attribute_single_kind(name) + case name + when :kind + kind + when :key + key + when :name + @name + when :anonymous + @anonymous + else + @attributes&.fetch(name, nil) + end + end + + # + # Convenience method to create a simple single kind context providing only + # a key and kind type. + # + # @param key [String] + # @param kind [String] + # + def self.with_key(key, kind = KIND_DEFAULT) + create({key: key, kind: kind}) + end + + # + # Create a single kind context from the provided hash. + # + # The provided hash must match the format as outlined in the + # {https://docs.launchdarkly.com/sdk/features/user-config SDK + # documentation}. + # + # @param data [Hash] + # @return [LDContext] + # + def self.create(data) + return create_invalid_context(ERR_NOT_HASH) unless data.is_a?(Hash) + return create_legacy_context(data) unless data.has_key?(:kind) + + kind = data[:kind] + if kind == KIND_MULTI + contexts = [] + data.each do |key, value| + next if key == :kind + contexts << create_single_context(value, key.to_s) + end + + return create_multi(contexts) + end + + create_single_context(data, kind) + end + + # + # Create a multi-kind context from the array of LDContexts provided. + # + # A multi-kind context is comprised of two or more single kind contexts. + # You cannot include a multi-kind context instead another multi-kind + # context. + # + # Additionally, the kind of each single-kind context must be unique. For + # instance, you cannot create a multi-kind context that includes two user + # kind contexts. + # + # If you attempt to create a multi-kind context from one single-kind + # context, this method will return the single-kind context instead of a new + # multi-kind context wrapping that one single-kind. + # + # @param contexts [Array] + # @return [LDContext] + # + def self.create_multi(contexts) + return create_invalid_context(ERR_KIND_MULTI_NON_CONTEXT_ARRAY) unless contexts.is_a?(Array) + return create_invalid_context(ERR_KIND_MULTI_WITH_NO_KINDS) if contexts.empty? + + kinds = Set.new + contexts.each do |context| + if !context.is_a?(LDContext) + return create_invalid_context(ERR_KIND_MULTI_NON_CONTEXT_ARRAY) + elsif !context.valid? + return create_invalid_context(ERR_KIND_MULTI_NON_CONTEXT_ARRAY) + elsif context.multi_kind? + return create_invalid_context(ERR_KIND_MULTI_CANNOT_CONTAIN_MULTI) + elsif kinds.include? context.kind + return create_invalid_context(ERR_KIND_MULTI_DUPLICATES) + end + + kinds.add(context.kind) + end + + return contexts[0] if contexts.length == 1 + + full_key = contexts.sort_by(&:kind) + .map { |c| LaunchDarkly::Impl::Context::canonicalize_key_for_kind(c.kind, c.key) } + .join(":") + + new(nil, full_key, "multi", nil, false, nil, nil, nil, contexts) + end + + # + # @param error [String] + # @return [LDContext] + # + private_class_method def self.create_invalid_context(error) + new(nil, nil, nil, nil, false, nil, nil, error) + end + + # + # @param data [Hash] + # @return [LDContext] + # + private_class_method def self.create_legacy_context(data) + key = data[:key] + + # Legacy users are allowed to have "" as a key but they cannot have nil as a key. + return create_invalid_context(ERR_KEY_EMPTY) if key.nil? + + name = data[:name] + name_error = LaunchDarkly::Impl::Context.validate_name(name) + return create_invalid_context(name_error) unless name_error.nil? + + anonymous = data[:anonymous] + anonymous_error = LaunchDarkly::Impl::Context.validate_anonymous(anonymous, true) + return create_invalid_context(anonymous_error) unless anonymous_error.nil? + + custom = data[:custom] + unless custom.nil? || custom.is_a?(Hash) + return create_invalid_context(ERR_CUSTOM_NON_HASH) + end + + # We only need to create an attribute hash if one of these keys exist. + # Everything else is stored in dedicated instance variables. + attributes = custom.clone + data.each do |k, v| + case k + when :ip, :email, :avatar, :firstName, :lastName, :country + attributes ||= {} + attributes[k] = v.clone + else + next + end + end + + private_attributes = data[:privateAttributeNames] + if private_attributes && !private_attributes.is_a?(Array) + return create_invalid_context(ERR_PRIVATE_NON_ARRAY) + end + + new(key.to_s, key.to_s, KIND_DEFAULT, name, anonymous, attributes, private_attributes) + end + + # + # @param data [Hash] + # @param kind [String] + # @return [LaunchDarkly::LDContext] + # + private_class_method def self.create_single_context(data, kind) + unless data.is_a?(Hash) + return create_invalid_context(ERR_NOT_HASH) + end + + kind_error = LaunchDarkly::Impl::Context.validate_kind(kind) + return create_invalid_context(kind_error) unless kind_error.nil? + + key = data[:key] + key_error = LaunchDarkly::Impl::Context.validate_key(key) + return create_invalid_context(key_error) unless key_error.nil? + + name = data[:name] + name_error = LaunchDarkly::Impl::Context.validate_name(name) + return create_invalid_context(name_error) unless name_error.nil? + + anonymous = data.fetch(:anonymous, false) + anonymous_error = LaunchDarkly::Impl::Context.validate_anonymous(anonymous, false) + return create_invalid_context(anonymous_error) unless anonymous_error.nil? + + meta = data.fetch(:_meta, {}) + private_attributes = meta[:privateAttributes] + if private_attributes && !private_attributes.is_a?(Array) + return create_invalid_context(ERR_PRIVATE_NON_ARRAY) + end + + # We only need to create an attribute hash if there are keys set outside + # of the ones we store in dedicated instance variables. + attributes = nil + data.each do |k, v| + case k + when :kind, :key, :name, :anonymous, :_meta + next + else + attributes ||= {} + attributes[k] = v.clone + end + end + + full_key = kind == LDContext::KIND_DEFAULT ? key.to_s : LaunchDarkly::Impl::Context::canonicalize_key_for_kind(kind, key.to_s) + new(key.to_s, full_key, kind, name, anonymous, attributes, private_attributes) + end + end +end diff --git a/lib/ldclient-rb/evaluation_detail.rb b/lib/ldclient-rb/evaluation_detail.rb index 676da7a3..5d57aaff 100644 --- a/lib/ldclient-rb/evaluation_detail.rb +++ b/lib/ldclient-rb/evaluation_detail.rb @@ -12,7 +12,7 @@ class EvaluationDetail # @raise [ArgumentError] if `variation_index` or `reason` is not of the correct type def initialize(value, variation_index, reason) raise ArgumentError.new("variation_index must be a number") if !variation_index.nil? && !(variation_index.is_a? Numeric) - raise ArgumentError.new("reason must be an EvaluationReason") if !(reason.is_a? EvaluationReason) + raise ArgumentError.new("reason must be an EvaluationReason") unless reason.is_a? EvaluationReason @value = value @variation_index = variation_index @reason = reason @@ -70,20 +70,20 @@ def ==(other) class EvaluationReason # Value for {#kind} indicating that the flag was off and therefore returned its configured off value. OFF = :OFF - - # Value for {#kind} indicating that the flag was on but the user did not match any targets or rules. + + # Value for {#kind} indicating that the flag was on but the context did not match any targets or rules. FALLTHROUGH = :FALLTHROUGH - - # Value for {#kind} indicating that the user key was specifically targeted for this flag. + + # Value for {#kind} indicating that the context key was specifically targeted for this flag. TARGET_MATCH = :TARGET_MATCH - - # Value for {#kind} indicating that the user matched one of the flag's rules. + + # Value for {#kind} indicating that the context matched one of the flag's rules. RULE_MATCH = :RULE_MATCH - + # Value for {#kind} indicating that the flag was considered off because it had at least one # prerequisite flag that either was off or did not return the desired variation. PREREQUISITE_FAILED = :PREREQUISITE_FAILED - + # Value for {#kind} indicating that the flag could not be evaluated, e.g. because it does not exist # or due to an unexpected error. In this case the result value will be the application default value # that the caller passed to the client. Check {#error_kind} for more details on the problem. @@ -100,8 +100,8 @@ class EvaluationReason # a rule specified a nonexistent variation. An error message will always be logged in this case. ERROR_MALFORMED_FLAG = :MALFORMED_FLAG - # Value for {#error_kind} indicating that the caller passed `nil` for the user parameter, or the - # user lacked a key. + # Value for {#error_kind} indicating that the caller passed `nil` for the context parameter, or the + # context was invalid. ERROR_USER_NOT_SPECIFIED = :USER_NOT_SPECIFIED # Value for {#error_kind} indicating that an unexpected exception stopped flag evaluation. An error @@ -141,7 +141,7 @@ class EvaluationReason # querying at least one Big Segment. Otherwise it returns `nil`. Possible values are defined by # {BigSegmentsStatus}. # - # Big Segments are a specific kind of user segments. For more information, read the LaunchDarkly + # Big Segments are a specific kind of context segments. For more information, read the LaunchDarkly # documentation: https://docs.launchdarkly.com/home/users/big-segments # @return [Symbol] attr_reader :big_segments_status @@ -176,9 +176,9 @@ def self.target_match # @return [EvaluationReason] # @raise [ArgumentError] if `rule_index` is not a number or `rule_id` is not a string def self.rule_match(rule_index, rule_id, in_experiment=false) - raise ArgumentError.new("rule_index must be a number") if !(rule_index.is_a? Numeric) + raise ArgumentError.new("rule_index must be a number") unless rule_index.is_a? Numeric raise ArgumentError.new("rule_id must be a string") if !rule_id.nil? && !(rule_id.is_a? String) # in test data, ID could be nil - + if in_experiment er = new(:RULE_MATCH, rule_index, rule_id, nil, nil, true) else @@ -193,7 +193,7 @@ def self.rule_match(rule_index, rule_id, in_experiment=false) # @return [EvaluationReason] # @raise [ArgumentError] if `prerequisite_key` is nil or not a string def self.prerequisite_failed(prerequisite_key) - raise ArgumentError.new("prerequisite_key must be a string") if !(prerequisite_key.is_a? String) + raise ArgumentError.new("prerequisite_key must be a string") unless prerequisite_key.is_a? String new(:PREREQUISITE_FAILED, nil, nil, prerequisite_key, nil) end @@ -203,7 +203,7 @@ def self.prerequisite_failed(prerequisite_key) # @return [EvaluationReason] # @raise [ArgumentError] if `error_kind` is not a symbol def self.error(error_kind) - raise ArgumentError.new("error_kind must be a symbol") if !(error_kind.is_a? Symbol) + raise ArgumentError.new("error_kind must be a symbol") unless error_kind.is_a? Symbol e = @@error_instances[error_kind] e.nil? ? make_error(error_kind) : e end @@ -279,7 +279,7 @@ def as_json(*) # parameter is unused, but may be passed if we're using the json else { kind: @kind } end - if !@big_segments_status.nil? + unless @big_segments_status.nil? ret[:bigSegmentsStatus] = @big_segments_status end ret @@ -327,9 +327,9 @@ def initialize(kind, rule_index, rule_id, prerequisite_key, error_kind, in_exper @kind = kind.to_sym @rule_index = rule_index @rule_id = rule_id - @rule_id.freeze if !rule_id.nil? + @rule_id.freeze unless rule_id.nil? @prerequisite_key = prerequisite_key - @prerequisite_key.freeze if !prerequisite_key.nil? + @prerequisite_key.freeze unless prerequisite_key.nil? @error_kind = error_kind @in_experiment = in_experiment @big_segments_status = big_segments_status @@ -348,7 +348,7 @@ def initialize(kind, rule_index, rule_id, prerequisite_key, error_kind, in_exper ERROR_FLAG_NOT_FOUND => make_error(ERROR_FLAG_NOT_FOUND), ERROR_MALFORMED_FLAG => make_error(ERROR_MALFORMED_FLAG), ERROR_USER_NOT_SPECIFIED => make_error(ERROR_USER_NOT_SPECIFIED), - ERROR_EXCEPTION => make_error(ERROR_EXCEPTION) + ERROR_EXCEPTION => make_error(ERROR_EXCEPTION), } end diff --git a/lib/ldclient-rb/events.rb b/lib/ldclient-rb/events.rb index f2b3e9f9..1c44ba59 100644 --- a/lib/ldclient-rb/events.rb +++ b/lib/ldclient-rb/events.rb @@ -1,3 +1,4 @@ +require "ldclient-rb/impl/context_filter" require "ldclient-rb/impl/diagnostic_events" require "ldclient-rb/impl/event_sender" require "ldclient-rb/impl/event_summarizer" @@ -20,7 +21,7 @@ # On a separate worker thread, EventDispatcher consumes events from the inbox. These are considered # "input events" because they may or may not actually be sent to LaunchDarkly; most flag evaluation # events are not sent, but are counted and the counters become part of a single summary event. -# EventDispatcher updates those counters, creates "index" events for any users that have not been seen +# EventDispatcher updates those counters, creates "index" events for any contexts that have not been seen # recently, and places any events that will be sent to LaunchDarkly into the "outbox" queue. # # When it is time to flush events to LaunchDarkly, the contents of the outbox are handed off to @@ -30,7 +31,7 @@ module LaunchDarkly module EventProcessorMethods def record_eval_event( - user, + context, key, version = nil, variation = nil, @@ -43,20 +44,17 @@ def record_eval_event( ) end - def record_identify_event(user) + def record_identify_event(context) end def record_custom_event( - user, + context, key, data = nil, metric_value = nil ) end - def record_alias_event(user, previous_user) - end - def flush end @@ -65,11 +63,7 @@ def stop end MAX_FLUSH_WORKERS = 5 - USER_ATTRS_TO_STRINGIFY_FOR_EVENTS = [ :key, :secondary, :ip, :country, :email, :firstName, :lastName, - :avatar, :name ] - private_constant :MAX_FLUSH_WORKERS - private_constant :USER_ATTRS_TO_STRINGIFY_FOR_EVENTS # @private class NullEventProcessor @@ -81,7 +75,7 @@ class FlushMessage end # @private - class FlushUsersMessage + class FlushContextsMessage end # @private @@ -93,7 +87,7 @@ class SynchronousMessage def initialize @reply = Concurrent::Semaphore.new(0) end - + def completed @reply.release end @@ -123,10 +117,10 @@ def initialize(sdk_key, config, client = nil, diagnostic_accumulator = nil, test post_to_inbox(FlushMessage.new) end @flush_task.execute - @users_flush_task = Concurrent::TimerTask.new(execution_interval: config.user_keys_flush_interval) do - post_to_inbox(FlushUsersMessage.new) + @contexts_flush_task = Concurrent::TimerTask.new(execution_interval: config.context_keys_flush_interval) do + post_to_inbox(FlushContextsMessage.new) end - @users_flush_task.execute + @contexts_flush_task.execute if !diagnostic_accumulator.nil? interval = test_properties && test_properties.has_key?(:diagnostic_recording_interval) ? test_properties[:diagnostic_recording_interval] : @@ -142,7 +136,7 @@ def initialize(sdk_key, config, client = nil, diagnostic_accumulator = nil, test @inbox_full = Concurrent::AtomicBoolean.new(false) event_sender = (test_properties || {})[:event_sender] || - Impl::EventSender.new(sdk_key, config, client ? client : Util.new_http_client(config.events_uri, config)) + Impl::EventSender.new(sdk_key, config, client || Util.new_http_client(config.events_uri, config)) @timestamp_fn = (test_properties || {})[:timestamp_fn] || proc { Impl::Util.current_time_millis } @@ -150,7 +144,7 @@ def initialize(sdk_key, config, client = nil, diagnostic_accumulator = nil, test end def record_eval_event( - user, + context, key, version = nil, variation = nil, @@ -161,26 +155,16 @@ def record_eval_event( debug_until = nil, prereq_of = nil ) - post_to_inbox(LaunchDarkly::Impl::EvalEvent.new(timestamp, user, key, version, variation, value, reason, + post_to_inbox(LaunchDarkly::Impl::EvalEvent.new(timestamp, context, key, version, variation, value, reason, default, track_events, debug_until, prereq_of)) end - def record_identify_event(user) - post_to_inbox(LaunchDarkly::Impl::IdentifyEvent.new(timestamp, user)) + def record_identify_event(context) + post_to_inbox(LaunchDarkly::Impl::IdentifyEvent.new(timestamp, context)) end - def record_custom_event(user, key, data = nil, metric_value = nil) - post_to_inbox(LaunchDarkly::Impl::CustomEvent.new(timestamp, user, key, data, metric_value)) - end - - def record_alias_event(user, previous_user) - post_to_inbox(LaunchDarkly::Impl::AliasEvent.new( - timestamp, - user.nil? ? nil : user[:key], - user_to_context_kind(user), - previous_user.nil? ? nil : previous_user[:key], - user_to_context_kind(previous_user) - )) + def record_custom_event(context, key, data = nil, metric_value = nil) + post_to_inbox(LaunchDarkly::Impl::CustomEvent.new(timestamp, context, key, data, metric_value)) end def flush @@ -192,8 +176,8 @@ def stop # final shutdown, which includes a final flush, is done synchronously if @stopped.make_true @flush_task.shutdown - @users_flush_task.shutdown - @diagnostic_event_task.shutdown if !@diagnostic_event_task.nil? + @contexts_flush_task.shutdown + @diagnostic_event_task.shutdown unless @diagnostic_event_task.nil? # Note that here we are not calling post_to_inbox, because we *do* want to wait if the inbox # is full; an orderly shutdown can't happen unless these messages are received. @inbox << FlushMessage.new @@ -227,10 +211,6 @@ def wait_until_inactive end end end - - private def user_to_context_kind(user) - (user.nil? || !user[:anonymous]) ? 'user' : 'anonymousUser' - end end # @private @@ -241,13 +221,13 @@ def initialize(inbox, sdk_key, config, diagnostic_accumulator, event_sender) @diagnostic_accumulator = config.diagnostic_opt_out? ? nil : diagnostic_accumulator @event_sender = event_sender - @user_keys = SimpleLRUCacheSet.new(config.user_keys_capacity) + @context_keys = SimpleLRUCacheSet.new(config.context_keys_capacity) @formatter = EventOutputFormatter.new(config) @disabled = Concurrent::AtomicBoolean.new(false) @last_known_past_time = Concurrent::AtomicReference.new(0) - @deduplicated_users = 0 + @deduplicated_contexts = 0 @events_in_last_batch = 0 - + outbox = EventBuffer.new(config.capacity, config.logger) flush_workers = NonBlockingThreadPool.new(MAX_FLUSH_WORKERS) @@ -272,8 +252,8 @@ def main_loop(inbox, outbox, flush_workers, diagnostic_event_workers) case message when FlushMessage trigger_flush(outbox, flush_workers) - when FlushUsersMessage - @user_keys.clear + when FlushContextsMessage + @context_keys.clear when DiagnosticEventMessage send_and_reset_diagnostics(outbox, diagnostic_event_workers) when TestSyncMessage @@ -295,7 +275,7 @@ def main_loop(inbox, outbox, flush_workers, diagnostic_event_workers) def do_shutdown(flush_workers, diagnostic_event_workers) flush_workers.shutdown flush_workers.wait_for_termination - if !diagnostic_event_workers.nil? + unless diagnostic_event_workers.nil? diagnostic_event_workers.shutdown diagnostic_event_workers.wait_for_termination end @@ -305,7 +285,7 @@ def do_shutdown(flush_workers, diagnostic_event_workers) def synchronize_for_testing(flush_workers, diagnostic_event_workers) # Used only by unit tests. Wait until all active flush workers have finished. flush_workers.wait_all - diagnostic_event_workers.wait_all if !diagnostic_event_workers.nil? + diagnostic_event_workers.wait_all unless diagnostic_event_workers.nil? end def dispatch_event(event, outbox) @@ -327,27 +307,26 @@ def dispatch_event(event, outbox) will_add_full_event = true end - # For each user we haven't seen before, we add an index event - unless this is already - # an identify event for that user. - if !(will_add_full_event && @config.inline_users_in_events) - if !event.user.nil? && !notice_user(event.user) && !event.is_a?(LaunchDarkly::Impl::IdentifyEvent) - outbox.add_event(LaunchDarkly::Impl::IndexEvent.new(event.timestamp, event.user)) - end + # For each context we haven't seen before, we add an index event - unless this is already + # an identify event for that context. + if !event.context.nil? && !notice_context(event.context) && !event.is_a?(LaunchDarkly::Impl::IdentifyEvent) + outbox.add_event(LaunchDarkly::Impl::IndexEvent.new(event.timestamp, event.context)) end outbox.add_event(event) if will_add_full_event - outbox.add_event(debug_event) if !debug_event.nil? + outbox.add_event(debug_event) unless debug_event.nil? end - # Add to the set of users we've noticed, and return true if the user was already known to us. - def notice_user(user) - if user.nil? || !user.has_key?(:key) - true - else - known = @user_keys.add(user[:key].to_s) - @deduplicated_users += 1 if known - known - end + # + # Add to the set of contexts we've noticed, and return true if the context + # was already known to us. + # @param context [LaunchDarkly::LDContext] + # @return [Boolean] + # + def notice_context(context) + known = @context_keys.add(context.fully_qualified_key) + @deduplicated_contexts += 1 if known + known end def should_debug_event(event) @@ -365,7 +344,7 @@ def trigger_flush(outbox, flush_workers) return end - payload = outbox.get_payload + payload = outbox.get_payload if !payload.events.empty? || !payload.summary.counters.empty? count = payload.events.length + (payload.summary.counters.empty? ? 0 : 1) @events_in_last_batch = count @@ -375,7 +354,7 @@ def trigger_flush(outbox, flush_workers) events_out = @formatter.make_output_events(payload.events, payload.summary) result = @event_sender.send_event_data(events_out.to_json, "#{events_out.length} events", false) @disabled.value = true if result.must_shutdown - if !result.time_from_server.nil? + unless result.time_from_server.nil? @last_known_past_time.value = (result.time_from_server.to_f * 1000).to_i end rescue => e @@ -391,8 +370,8 @@ def trigger_flush(outbox, flush_workers) def send_and_reset_diagnostics(outbox, diagnostic_event_workers) return if @diagnostic_accumulator.nil? dropped_count = outbox.get_and_clear_dropped_count - event = @diagnostic_accumulator.create_periodic_event_and_reset(dropped_count, @deduplicated_users, @events_in_last_batch) - @deduplicated_users = 0 + event = @diagnostic_accumulator.create_periodic_event_and_reset(dropped_count, @deduplicated_contexts, @events_in_last_batch) + @deduplicated_contexts = 0 @events_in_last_batch = 0 send_diagnostic_event(event, diagnostic_event_workers) end @@ -430,7 +409,7 @@ def add_event(event) @capacity_exceeded = false else @dropped_events += 1 - if !@capacity_exceeded + unless @capacity_exceeded @capacity_exceeded = true @logger.warn { "[LDClient] Exceeded event queue capacity. Increase capacity to avoid dropping events." } end @@ -442,7 +421,7 @@ def add_to_summary(event) end def get_payload - return FlushPayload.new(@events, @summarizer.snapshot) + FlushPayload.new(@events, @summarizer.snapshot) end def get_and_clear_dropped_count @@ -462,21 +441,18 @@ class EventOutputFormatter FEATURE_KIND = 'feature' IDENTIFY_KIND = 'identify' CUSTOM_KIND = 'custom' - ALIAS_KIND = 'alias' INDEX_KIND = 'index' DEBUG_KIND = 'debug' SUMMARY_KIND = 'summary' - ANONYMOUS_USER_CONTEXT_KIND = 'anonymousUser' def initialize(config) - @inline_users = config.inline_users_in_events - @user_filter = UserFilter.new(config) + @context_filter = LaunchDarkly::Impl::ContextFilter.new(config.all_attributes_private, config.private_attributes) end # Transforms events into the format used for event sending. def make_output_events(events, summary) events_out = events.map { |e| make_output_event(e) } - if !summary.counters.empty? + unless summary.counters.empty? events_out.push(make_summary_event(summary)) end events_out @@ -484,75 +460,62 @@ def make_output_events(events, summary) private def make_output_event(event) case event - + when LaunchDarkly::Impl::EvalEvent out = { kind: FEATURE_KIND, creationDate: event.timestamp, key: event.key, - value: event.value + value: event.value, } - out[:default] = event.default if !event.default.nil? - out[:variation] = event.variation if !event.variation.nil? - out[:version] = event.version if !event.version.nil? - out[:prereqOf] = event.prereq_of if !event.prereq_of.nil? - set_opt_context_kind(out, event.user) - set_user_or_user_key(out, event.user) - out[:reason] = event.reason if !event.reason.nil? + out[:default] = event.default unless event.default.nil? + out[:variation] = event.variation unless event.variation.nil? + out[:version] = event.version unless event.version.nil? + out[:prereqOf] = event.prereq_of unless event.prereq_of.nil? + out[:contextKeys] = event.context.keys + out[:reason] = event.reason unless event.reason.nil? out when LaunchDarkly::Impl::IdentifyEvent { kind: IDENTIFY_KIND, creationDate: event.timestamp, - key: event.user[:key].to_s, - user: process_user(event.user) + key: event.context.fully_qualified_key, + context: @context_filter.filter(event.context), } - + when LaunchDarkly::Impl::CustomEvent out = { kind: CUSTOM_KIND, creationDate: event.timestamp, - key: event.key + key: event.key, } - out[:data] = event.data if !event.data.nil? - set_user_or_user_key(out, event.user) - out[:metricValue] = event.metric_value if !event.metric_value.nil? - set_opt_context_kind(out, event.user) + out[:data] = event.data unless event.data.nil? + out[:contextKeys] = event.context.keys + out[:metricValue] = event.metric_value unless event.metric_value.nil? out - when LaunchDarkly::Impl::AliasEvent - { - kind: ALIAS_KIND, - creationDate: event.timestamp, - key: event.key, - contextKind: event.context_kind, - previousKey: event.previous_key, - previousContextKind: event.previous_context_kind - } - when LaunchDarkly::Impl::IndexEvent { kind: INDEX_KIND, creationDate: event.timestamp, - user: process_user(event.user) + context: @context_filter.filter(event.context), } - + when LaunchDarkly::Impl::DebugEvent original = event.eval_event out = { kind: DEBUG_KIND, creationDate: original.timestamp, key: original.key, - user: process_user(original.user), - value: original.value + context: @context_filter.filter(original.context), + value: original.value, } - out[:default] = original.default if !original.default.nil? - out[:variation] = original.variation if !original.variation.nil? - out[:version] = original.version if !original.version.nil? - out[:prereqOf] = original.prereq_of if !original.prereq_of.nil? - set_opt_context_kind(out, original.user) - out[:reason] = original.reason if !original.reason.nil? + out[:default] = original.default unless original.default.nil? + out[:variation] = original.variation unless original.variation.nil? + out[:version] = original.version unless original.version.nil? + out[:prereqOf] = original.prereq_of unless original.prereq_of.nil? + out[:reason] = original.reason unless original.reason.nil? out else @@ -569,9 +532,9 @@ def make_output_events(events, summary) variations.each do |variation, counter| c = { value: counter.value, - count: counter.count + count: counter.count, } - c[:variation] = variation if !variation.nil? + c[:variation] = variation unless variation.nil? if version.nil? c[:unknown] = true else @@ -580,32 +543,14 @@ def make_output_events(events, summary) counters.push(c) end end - flags[flagKey] = { default: flagInfo.default, counters: counters } + flags[flagKey] = { default: flagInfo.default, counters: counters, contextKinds: flagInfo.context_kinds.to_a } end { kind: SUMMARY_KIND, startDate: summary[:start_date], endDate: summary[:end_date], - features: flags + features: flags, } end - - private def set_opt_context_kind(out, user) - out[:contextKind] = ANONYMOUS_USER_CONTEXT_KIND if !user.nil? && user[:anonymous] - end - - private def set_user_or_user_key(out, user) - if @inline_users - out[:user] = process_user(user) - else - key = user[:key] - out[:userKey] = key.is_a?(String) ? key : key.to_s - end - end - - private def process_user(user) - filtered = @user_filter.transform_user_props(user) - Util.stringify_attrs(filtered, USER_ATTRS_TO_STRINGIFY_FOR_EVENTS) - end end end diff --git a/lib/ldclient-rb/file_data_source.rb b/lib/ldclient-rb/file_data_source.rb deleted file mode 100644 index 30440353..00000000 --- a/lib/ldclient-rb/file_data_source.rb +++ /dev/null @@ -1,23 +0,0 @@ -require "ldclient-rb/integrations/file_data" - -module LaunchDarkly - # - # Deprecated entry point for the file data source feature. - # - # The new preferred usage is {LaunchDarkly::Integrations::FileData#data_source}. - # - # @deprecated This is replaced by {LaunchDarkly::Integrations::FileData}. - # - class FileDataSource - # - # Deprecated entry point for the file data source feature. - # - # The new preferred usage is {LaunchDarkly::Integrations::FileData#data_source}. - # - # @deprecated This is replaced by {LaunchDarkly::Integrations::FileData#data_source}. - # - def self.factory(options={}) - LaunchDarkly::Integrations::FileData.data_source(options) - end - end -end diff --git a/lib/ldclient-rb/flags_state.rb b/lib/ldclient-rb/flags_state.rb index 50fcec88..57807859 100644 --- a/lib/ldclient-rb/flags_state.rb +++ b/lib/ldclient-rb/flags_state.rb @@ -2,7 +2,7 @@ module LaunchDarkly # - # A snapshot of the state of all feature flags with regard to a specific user, generated by + # A snapshot of the state of all feature flags with regard to a specific context, generated by # calling the {LDClient#all_flags_state}. Serializing this object to JSON using # `JSON.generate` (or the `to_json` method) will produce the appropriate data structure for # bootstrapping the LaunchDarkly JavaScript client. @@ -34,11 +34,11 @@ def add_flag(flag_state, with_reasons, details_only_if_tracked) meta[:reason] = reason end - if !omit_details + unless omit_details meta[:version] = flag_state[:version] end - meta[:variation] = flag_state[:variation] if !flag_state[:variation].nil? + meta[:variation] = flag_state[:variation] unless flag_state[:variation].nil? meta[:trackEvents] = true if flag_state[:trackEvents] meta[:trackReason] = true if flag_state[:trackReason] meta[:debugEventsUntilDate] = flag_state[:debugEventsUntilDate] if flag_state[:debugEventsUntilDate] @@ -46,7 +46,7 @@ def add_flag(flag_state, with_reasons, details_only_if_tracked) end # Returns true if this object contains a valid snapshot of feature flag state, or false if the - # state could not be computed (for instance, because the client was offline or there was no user). + # state could not be computed (for instance, because the client was offline or there was no context). def valid? @valid end diff --git a/lib/ldclient-rb/impl/big_segments.rb b/lib/ldclient-rb/impl/big_segments.rb index c2d82cd8..77a1b9b1 100644 --- a/lib/ldclient-rb/impl/big_segments.rb +++ b/lib/ldclient-rb/impl/big_segments.rb @@ -22,7 +22,7 @@ def initialize(big_segments_config, logger) @logger = logger @last_status = nil - if !@store.nil? + unless @store.nil? @cache = ExpiringCache.new(big_segments_config.user_cache_size, big_segments_config.user_cache_time) @poll_worker = RepeatingTask.new(big_segments_config.status_poll_interval, 0, -> { poll_store_and_update_status }, logger) @poll_worker.start @@ -32,25 +32,25 @@ def initialize(big_segments_config, logger) attr_reader :status_provider def stop - @poll_worker.stop if !@poll_worker.nil? - @store.stop if !@store.nil? + @poll_worker.stop unless @poll_worker.nil? + @store.stop unless @store.nil? end - def get_user_membership(user_key) - return nil if !@store - membership = @cache[user_key] - if !membership + def get_context_membership(context_key) + return nil unless @store + membership = @cache[context_key] + unless membership begin - membership = @store.get_membership(BigSegmentStoreManager.hash_for_user_key(user_key)) + membership = @store.get_membership(BigSegmentStoreManager.hash_for_context_key(context_key)) membership = EMPTY_MEMBERSHIP if membership.nil? - @cache[user_key] = membership + @cache[context_key] = membership rescue => e LaunchDarkly::Util.log_exception(@logger, "Big Segment store membership query returned error", e) return BigSegmentMembershipResult.new(nil, BigSegmentsStatus::STORE_ERROR) end end - poll_store_and_update_status if !@last_status - if !@last_status.available + poll_store_and_update_status unless @last_status + unless @last_status.available return BigSegmentMembershipResult.new(membership, BigSegmentsStatus::STORE_ERROR) end BigSegmentMembershipResult.new(membership, @last_status.stale ? BigSegmentsStatus::STALE : BigSegmentsStatus::HEALTHY) @@ -62,26 +62,26 @@ def get_status def poll_store_and_update_status new_status = Interfaces::BigSegmentStoreStatus.new(false, false) # default to "unavailable" if we don't get a new status below - if !@store.nil? + unless @store.nil? begin metadata = @store.get_metadata - new_status = Interfaces::BigSegmentStoreStatus.new(true, !metadata || is_stale(metadata.last_up_to_date)) + new_status = Interfaces::BigSegmentStoreStatus.new(true, !metadata || stale?(metadata.last_up_to_date)) rescue => e LaunchDarkly::Util.log_exception(@logger, "Big Segment store status query returned error", e) end end @last_status = new_status @status_provider.update_status(new_status) - + new_status end - def is_stale(timestamp) + def stale?(timestamp) !timestamp || ((Impl::Util.current_time_millis - timestamp) >= @stale_after_millis) end - def self.hash_for_user_key(user_key) - Digest::SHA256.base64digest(user_key) + def self.hash_for_context_key(context_key) + Digest::SHA256.base64digest(context_key) end end diff --git a/lib/ldclient-rb/impl/context.rb b/lib/ldclient-rb/impl/context.rb new file mode 100644 index 00000000..e1b9e7a0 --- /dev/null +++ b/lib/ldclient-rb/impl/context.rb @@ -0,0 +1,96 @@ +require "erb" + +module LaunchDarkly + module Impl + module Context + ERR_KIND_NON_STRING = 'context kind must be a string' + ERR_KIND_CANNOT_BE_KIND = '"kind" is not a valid context kind' + ERR_KIND_CANNOT_BE_MULTI = '"multi" is not a valid context kind' + ERR_KIND_INVALID_CHARS = 'context kind contains disallowed characters' + + ERR_KEY_NON_STRING = 'context key must be a string' + ERR_KEY_EMPTY = 'context key must not be empty' + + ERR_NAME_NON_STRING = 'context name must be a string' + + ERR_ANONYMOUS_NON_BOOLEAN = 'context anonymous must be a boolean' + + # + # We allow consumers of this SDK to provide us with either a Hash or an + # instance of an LDContext. This is convenient for them but not as much + # for us. To make the conversion slightly more convenient for us, we have + # created this method. + # + # @param context [Hash, LDContext] + # @return [LDContext] + # + def self.make_context(context) + return context if context.is_a?(LDContext) + + LDContext.create(context) + end + + # + # Returns an error message if the kind is invalid; nil otherwise. + # + # @param kind [any] + # @return [String, nil] + # + def self.validate_kind(kind) + return ERR_KIND_NON_STRING unless kind.is_a?(String) + return ERR_KIND_CANNOT_BE_KIND if kind == "kind" + return ERR_KIND_CANNOT_BE_MULTI if kind == "multi" + return ERR_KIND_INVALID_CHARS unless kind.match?(/^[\w.-]+$/) + end + + # + # Returns an error message if the key is invalid; nil otherwise. + # + # @param key [any] + # @return [String, nil] + # + def self.validate_key(key) + return ERR_KEY_NON_STRING unless key.is_a?(String) + return ERR_KEY_EMPTY if key == "" + end + + # + # Returns an error message if the name is invalid; nil otherwise. + # + # @param name [any] + # @return [String, nil] + # + def self.validate_name(name) + return ERR_NAME_NON_STRING unless name.nil? || name.is_a?(String) + end + + # + # Returns an error message if anonymous is invalid; nil otherwise. + # + # @param anonymous [any] + # @param allow_nil [Boolean] + # @return [String, nil] + # + def self.validate_anonymous(anonymous, allow_nil) + return nil if anonymous.nil? && allow_nil + return nil if [true, false].include? anonymous + + ERR_ANONYMOUS_NON_BOOLEAN + end + + # + # @param kind [String] + # @param key [String] + # @return [String] + # + def self.canonicalize_key_for_kind(kind, key) + # When building a FullyQualifiedKey, ':' and '%' are percent-escaped; + # we do not use a full URL-encoding function because implementations of + # this are inconsistent across platforms. + encoded = key.gsub("%", "%25").gsub(":", "%3A") + + "#{kind}:#{encoded}" + end + end + end +end diff --git a/lib/ldclient-rb/impl/context_filter.rb b/lib/ldclient-rb/impl/context_filter.rb new file mode 100644 index 00000000..510fe28c --- /dev/null +++ b/lib/ldclient-rb/impl/context_filter.rb @@ -0,0 +1,145 @@ +module LaunchDarkly + module Impl + class ContextFilter + # + # @param all_attributes_private [Boolean] + # @param private_attributes [Array] + # + def initialize(all_attributes_private, private_attributes) + @all_attributes_private = all_attributes_private + + @private_attributes = [] + private_attributes.each do |attribute| + reference = LaunchDarkly::Reference.create(attribute) + @private_attributes << reference if reference.error.nil? + end + end + + # + # Return a hash representation of the provided context with attribute + # redaction applied. + # + # @param context [LaunchDarkly::LDContext] + # @return [Hash] + # + def filter(context) + return filter_single_context(context, true) unless context.multi_kind? + + filtered = {kind: 'multi'} + (0...context.individual_context_count).each do |i| + c = context.individual_context(i) + next if c.nil? + + filtered[c.kind] = filter_single_context(c, false) + end + + filtered + end + + # + # Apply redaction rules for a single context. + # + # @param context [LaunchDarkly::LDContext] + # @param include_kind [Boolean] + # @return [Hash] + # + private def filter_single_context(context, include_kind) + filtered = {key: context.key} + + filtered[:kind] = context.kind if include_kind + filtered[:anonymous] = true if context.get_value(:anonymous) + + redacted = [] + private_attributes = @private_attributes.concat(context.private_attributes) + + name = context.get_value(:name) + if !name.nil? && !check_whole_attribute_private(:name, private_attributes, redacted) + filtered[:name] = name + end + + context.get_custom_attribute_names.each do |attribute| + unless check_whole_attribute_private(attribute, private_attributes, redacted) + value = context.get_value(attribute) + filtered[attribute] = redact_json_value(nil, attribute, value, private_attributes, redacted) + end + end + + filtered[:_meta] = {redactedAttributes: redacted} unless redacted.empty? + + filtered + end + + # + # Check if an entire attribute should be redacted. + # + # @param attribute [Symbol] + # @param private_attributes [Array] + # @param redacted [Array] + # @return [Boolean] + # + private def check_whole_attribute_private(attribute, private_attributes, redacted) + if @all_attributes_private + redacted << attribute + return true + end + + private_attributes.each do |private_attribute| + if private_attribute.component(0) == attribute && private_attribute.depth == 1 + redacted << attribute + return true + end + end + + false + end + + # + # Apply redaction rules to the provided value. + # + # @param parent_path [Array, nil] + # @param name [String] + # @param value [any] + # @param private_attributes [Array] + # @param redacted [Array] + # @return [any] + # + private def redact_json_value(parent_path, name, value, private_attributes, redacted) + return value unless value.is_a?(Hash) + + ret = {} + current_path = parent_path.clone || [] + current_path << name + + value.each do |k, v| + was_redacted = false + private_attributes.each do |private_attribute| + next unless private_attribute.depth == (current_path.count + 1) + + component = private_attribute.component(current_path.count) + next unless component == k + + match = true + (0...current_path.count).each do |i| + unless private_attribute.component(i) == current_path[i] + match = false + break + end + end + + if match + redacted << private_attribute.raw_path.to_sym + was_redacted = true + break + end + end + + unless was_redacted + ret[k] = redact_json_value(current_path, k, v, private_attributes, redacted) + end + end + + ret + end + end + end +end \ No newline at end of file diff --git a/lib/ldclient-rb/impl/diagnostic_events.rb b/lib/ldclient-rb/impl/diagnostic_events.rb index 13a55756..7bc26047 100644 --- a/lib/ldclient-rb/impl/diagnostic_events.rb +++ b/lib/ldclient-rb/impl/diagnostic_events.rb @@ -9,7 +9,7 @@ class DiagnosticAccumulator def self.create_diagnostic_id(sdk_key) { diagnosticId: SecureRandom.uuid, - sdkKeySuffix: sdk_key[-6..-1] || sdk_key + sdkKeySuffix: sdk_key[-6..-1] || sdk_key, } end @@ -25,16 +25,16 @@ def reset(time) end def create_init_event(config) - return { + { kind: 'diagnostic-init', creationDate: Util.current_time_millis, id: @id, configuration: DiagnosticAccumulator.make_config_data(config), sdk: DiagnosticAccumulator.make_sdk_data(config), - platform: DiagnosticAccumulator.make_platform_data + platform: DiagnosticAccumulator.make_platform_data, } end - + def record_stream_init(timestamp, failed, duration_millis) @lock.synchronize do @stream_inits.push({ timestamp: timestamp, failed: failed, durationMillis: duration_millis }) @@ -57,7 +57,7 @@ def create_periodic_event_and_reset(dropped_events, deduplicated_users, events_i droppedEvents: dropped_events, deduplicatedUsers: deduplicated_users, eventsInLastBatch: events_in_last_batch, - streamInits: previous_stream_inits + streamInits: previous_stream_inits, } @data_since_date = current_time event @@ -73,12 +73,11 @@ def self.make_config_data(config) diagnosticRecordingIntervalMillis: self.seconds_to_millis(config.diagnostic_recording_interval), eventsCapacity: config.capacity, eventsFlushIntervalMillis: self.seconds_to_millis(config.flush_interval), - inlineUsersInEvents: config.inline_users_in_events, pollingIntervalMillis: self.seconds_to_millis(config.poll_interval), socketTimeoutMillis: self.seconds_to_millis(config.read_timeout), streamingDisabled: !config.stream?, - userKeysCapacity: config.user_keys_capacity, - userKeysFlushIntervalMillis: self.seconds_to_millis(config.user_keys_flush_interval), + userKeysCapacity: config.context_keys_capacity, + userKeysFlushIntervalMillis: self.seconds_to_millis(config.context_keys_flush_interval), usingProxy: ENV.has_key?('http_proxy') || ENV.has_key?('https_proxy') || ENV.has_key?('HTTP_PROXY') || ENV.has_key?('HTTPS_PROXY'), usingRelayDaemon: config.use_ldd?, } @@ -88,7 +87,7 @@ def self.make_config_data(config) def self.make_sdk_data(config) ret = { name: 'ruby-server-sdk', - version: LaunchDarkly::VERSION + version: LaunchDarkly::VERSION, } if config.wrapper_name ret[:wrapperName] = config.wrapper_name @@ -105,7 +104,7 @@ def self.make_platform_data osName: self.normalize_os_name(conf['host_os']), osVersion: 'unknown', # there seems to be no portable way to detect this in Ruby rubyVersion: conf['ruby_version'], - rubyImplementation: Object.constants.include?(:RUBY_ENGINE) ? RUBY_ENGINE : 'unknown' + rubyImplementation: Object.constants.include?(:RUBY_ENGINE) ? RUBY_ENGINE : 'unknown', } end diff --git a/lib/ldclient-rb/impl/evaluator.rb b/lib/ldclient-rb/impl/evaluator.rb index e8c9567d..ec71bd76 100644 --- a/lib/ldclient-rb/impl/evaluator.rb +++ b/lib/ldclient-rb/impl/evaluator.rb @@ -2,6 +2,8 @@ require "ldclient-rb/impl/evaluator_bucketing" require "ldclient-rb/impl/evaluator_helpers" require "ldclient-rb/impl/evaluator_operators" +require "ldclient-rb/impl/model/feature_flag" +require "ldclient-rb/impl/model/segment" module LaunchDarkly module Impl @@ -12,6 +14,81 @@ module Impl :detail # the EvaluationDetail representing the evaluation result ) + class EvaluationException < StandardError + def initialize(msg, error_kind = EvaluationReason::ERROR_MALFORMED_FLAG) + super(msg) + @error_kind = error_kind + end + + # @return [Symbol] + attr_reader :error_kind + end + + class InvalidReferenceException < EvaluationException + end + + class EvaluatorState + # @param original_flag [LaunchDarkly::Impl::Model::FeatureFlag] + def initialize(original_flag) + @prereq_stack = EvaluatorStack.new(original_flag.key) + @segment_stack = EvaluatorStack.new(nil) + end + + attr_reader :prereq_stack + attr_reader :segment_stack + end + + # + # A helper class for managing cycle detection. + # + # Each time a method sees a new flag or segment, they can push that + # object's key onto the stack. Once processing for that object has + # finished, you can call pop to remove it. + # + # Because the most common use case would be a flag or segment without ANY + # prerequisites, this stack has a small optimization in place-- the stack + # is not created until absolutely necessary. + # + class EvaluatorStack + # @param original [String, nil] + def initialize(original) + @original = original + # @type [Array, nil] + @stack = nil + end + + # @param key [String] + def push(key) + # No need to store the key if we already have a record in our instance + # variable. + return if @original == key + + # The common use case is that flags/segments won't have prereqs, so we + # don't allocate the stack memory until we absolutely must. + if @stack.nil? + @stack = [] + end + + @stack.push(key) + end + + def pop + return if @stack.nil? || @stack.empty? + @stack.pop + end + + # + # @param key [String] + # @return [Boolean] + # + def include?(key) + return true if key == @original + return false if @stack.nil? + + @stack.include? key + end + end + # Encapsulates the feature flag evaluation logic. The Evaluator has no knowledge of the rest of the SDK environment; # if it needs to retrieve flags or segments that are referenced by a flag, it does so through a simple function that # is provided in the constructor. It also produces feature requests as appropriate for any referenced prerequisite @@ -22,7 +99,7 @@ class Evaluator # @param get_flag [Function] called if the Evaluator needs to query a different flag from the one that it is # currently evaluating (i.e. a prerequisite flag); takes a single parameter, the flag key, and returns the # flag data - or nil if the flag is unknown or deleted - # @param get_segment [Function] similar to `get_flag`, but is used to query a user segment. + # @param get_segment [Function] similar to `get_flag`, but is used to query a context segment. # @param logger [Logger] the client's logger def initialize(get_flag, get_segment, get_big_segments_membership, logger) @get_flag = get_flag @@ -30,15 +107,15 @@ def initialize(get_flag, get_segment, get_big_segments_membership, logger) @get_big_segments_membership = get_big_segments_membership @logger = logger end - + # Used internally to hold an evaluation result and additional state that may be accumulated during an # evaluation. It's simpler and a bit more efficient to represent these as mutable properties rather than # trying to use a pure functional approach, and since we're not exposing this object to any application code # or retaining it anywhere, we don't have to be quite as strict about immutability. # # The big_segments_status and big_segments_membership properties are not used by the caller; they are used - # during an evaluation to cache the result of any Big Segments query that we've done for this user, because - # we don't want to do multiple queries for the same user if multiple Big Segments are referenced in the same + # during an evaluation to cache the result of any Big Segments query that we've done for this context, because + # we don't want to do multiple queries for the same context if multiple Big Segments are referenced in the same # evaluation. EvalResult = Struct.new( :detail, # the EvaluationDetail representing the evaluation result @@ -56,223 +133,394 @@ def self.error_result(errorKind, value = nil) # any events that were generated for prerequisite flags; its `value` will be `nil` if the flag returns the # default value. Error conditions produce a result with a nil value and an error reason, not an exception. # - # @param flag [Object] the flag - # @param user [Object] the user properties - # @return [EvalResult] the evaluation result - def evaluate(flag, user) + # @param flag [LaunchDarkly::Impl::Model::FeatureFlag] the flag + # @param context [LaunchDarkly::LDContext] the evaluation context + # @return [EvalResult] the evaluation result + def evaluate(flag, context) + state = EvaluatorState.new(flag) + result = EvalResult.new - if user.nil? || user[:key].nil? - result.detail = Evaluator.error_result(EvaluationReason::ERROR_USER_NOT_SPECIFIED) + begin + detail = eval_internal(flag, context, result, state) + rescue EvaluationException => exn + LaunchDarkly::Util.log_exception(@logger, "Unexpected error when evaluating flag #{flag.key}", exn) + result.detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(exn.error_kind)) + return result + rescue => exn + LaunchDarkly::Util.log_exception(@logger, "Unexpected error when evaluating flag #{flag.key}", exn) + result.detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_EXCEPTION)) return result end - - detail = eval_internal(flag, user, result) - if !result.big_segments_status.nil? + + unless result.big_segments_status.nil? # If big_segments_status is non-nil at the end of the evaluation, it means a query was done at # some point and we will want to include the status in the evaluation reason. detail = EvaluationDetail.new(detail.value, detail.variation_index, detail.reason.with_big_segments_status(result.big_segments_status)) end result.detail = detail - return result + result end + # @param segment [LaunchDarkly::Impl::Model::Segment] def self.make_big_segment_ref(segment) # method is visible for testing # The format of Big Segment references is independent of what store implementation is being # used; the store implementation receives only this string and does not know the details of # the data model. The Relay Proxy will use the same format when writing to the store. - "#{segment[:key]}.g#{segment[:generation]}" + "#{segment.key}.g#{segment.generation}" end - private - - def eval_internal(flag, user, state) - if !flag[:on] - return EvaluatorHelpers.off_result(flag) + # @param flag [LaunchDarkly::Impl::Model::FeatureFlag] the flag + # @param context [LaunchDarkly::LDContext] the evaluation context + # @param eval_result [EvalResult] + # @param state [EvaluatorState] + # @raise [EvaluationException] + private def eval_internal(flag, context, eval_result, state) + unless flag.on + return flag.off_result end - prereq_failure_result = check_prerequisites(flag, user, state) - return prereq_failure_result if !prereq_failure_result.nil? + prereq_failure_result = check_prerequisites(flag, context, eval_result, state) + return prereq_failure_result unless prereq_failure_result.nil? + + # Check context target matches + target_result = check_targets(context, flag) + return target_result unless target_result.nil? - # Check user target matches - (flag[:targets] || []).each do |target| - (target[:values] || []).each do |value| - if value == user[:key] - return EvaluatorHelpers.target_match_result(target, flag) - end - end - end - # Check custom rules - rules = flag[:rules] || [] - rules.each_index do |i| - rule = rules[i] - if rule_match_user(rule, user, state) - reason = rule[:_reason] # try to use cached reason for this rule - reason = EvaluationReason::rule_match(i, rule[:id]) if reason.nil? - return get_value_for_variation_or_rollout(flag, rule, user, reason, - EvaluatorHelpers.rule_precomputed_results(rule)) + flag.rules.each do |rule| + if rule_match_context(rule, context, eval_result, state) + return get_value_for_variation_or_rollout(flag, rule.variation_or_rollout, context, rule.match_results) end end # Check the fallthrough rule - if !flag[:fallthrough].nil? - return get_value_for_variation_or_rollout(flag, flag[:fallthrough], user, EvaluationReason::fallthrough, - EvaluatorHelpers.fallthrough_precomputed_results(flag)) + unless flag.fallthrough.nil? + return get_value_for_variation_or_rollout(flag, flag.fallthrough, context, flag.fallthrough_results) end - return EvaluationDetail.new(nil, nil, EvaluationReason::fallthrough) + EvaluationDetail.new(nil, nil, EvaluationReason::fallthrough) end - def check_prerequisites(flag, user, state) - (flag[:prerequisites] || []).each do |prerequisite| - prereq_ok = true - prereq_key = prerequisite[:key] - prereq_flag = @get_flag.call(prereq_key) + # @param flag [LaunchDarkly::Impl::Model::FeatureFlag] the flag + # @param context [LaunchDarkly::LDContext] the evaluation context + # @param eval_result [EvalResult] + # @param state [EvaluatorState] + # @raise [EvaluationException] if a flag prereq cycle is detected + private def check_prerequisites(flag, context, eval_result, state) + return if flag.prerequisites.empty? - if prereq_flag.nil? - @logger.error { "[LDClient] Could not retrieve prerequisite flag \"#{prereq_key}\" when evaluating \"#{flag[:key]}\"" } - prereq_ok = false - else - begin - prereq_res = eval_internal(prereq_flag, user, state) + state.prereq_stack.push(flag.key) + + begin + flag.prerequisites.each do |prerequisite| + prereq_ok = true + prereq_key = prerequisite.key + + if state.prereq_stack.include?(prereq_key) + raise LaunchDarkly::Impl::EvaluationException.new( + "prerequisite relationship to \"#{prereq_key}\" caused a circular reference; this is probably a temporary condition due to an incomplete update" + ) + end + + prereq_flag = @get_flag.call(prereq_key) + + if prereq_flag.nil? + @logger.error { "[LDClient] Could not retrieve prerequisite flag \"#{prereq_key}\" when evaluating \"#{flag.key}\"" } + prereq_ok = false + else + prereq_res = eval_internal(prereq_flag, context, eval_result, state) # Note that if the prerequisite flag is off, we don't consider it a match no matter what its # off variation was. But we still need to evaluate it in order to generate an event. - if !prereq_flag[:on] || prereq_res.variation_index != prerequisite[:variation] + if !prereq_flag.on || prereq_res.variation_index != prerequisite.variation prereq_ok = false end prereq_eval = PrerequisiteEvalRecord.new(prereq_flag, flag, prereq_res) - state.prereq_evals = [] if state.prereq_evals.nil? - state.prereq_evals.push(prereq_eval) - rescue => exn - Util.log_exception(@logger, "Error evaluating prerequisite flag \"#{prereq_key}\" for flag \"#{flag[:key]}\"", exn) - prereq_ok = false + eval_result.prereq_evals = [] if eval_result.prereq_evals.nil? + eval_result.prereq_evals.push(prereq_eval) + end + + unless prereq_ok + return prerequisite.failure_result end end - if !prereq_ok - return EvaluatorHelpers.prerequisite_failed_result(prerequisite, flag) - end + ensure + state.prereq_stack.pop end + nil end - def rule_match_user(rule, user, state) - return false if !rule[:clauses] - - (rule[:clauses] || []).each do |clause| - return false if !clause_match_user(clause, user, state) + # @param rule [LaunchDarkly::Impl::Model::FlagRule] + # @param context [LaunchDarkly::LDContext] + # @param eval_result [EvalResult] + # @param state [EvaluatorState] + # @raise [InvalidReferenceException] + private def rule_match_context(rule, context, eval_result, state) + rule.clauses.each do |clause| + return false unless clause_match_context(clause, context, eval_result, state) end - return true + true end - def clause_match_user(clause, user, state) - # In the case of a segment match operator, we check if the user is in any of the segments, + # @param clause [LaunchDarkly::Impl::Model::Clause] + # @param context [LaunchDarkly::LDContext] + # @param eval_result [EvalResult] + # @param state [EvaluatorState] + # @raise [InvalidReferenceException] + private def clause_match_context(clause, context, eval_result, state) + # In the case of a segment match operator, we check if the context is in any of the segments, # and possibly negate - if clause[:op].to_sym == :segmentMatch - result = (clause[:values] || []).any? { |v| + if clause.op == :segmentMatch + result = clause.values.any? { |v| + if state.segment_stack.include?(v) + raise LaunchDarkly::Impl::EvaluationException.new( + "segment rule referencing segment \"#{v}\" caused a circular reference; this is probably a temporary condition due to an incomplete update" + ) + end + segment = @get_segment.call(v) - !segment.nil? && segment_match_user(segment, user, state) + !segment.nil? && segment_match_context(segment, context, eval_result, state) } - clause[:negate] ? !result : result + clause.negate ? !result : result else - clause_match_user_no_segments(clause, user) + clause_match_context_no_segments(clause, context) end end - def clause_match_user_no_segments(clause, user) - user_val = EvaluatorOperators.user_value(user, clause[:attribute]) - return false if user_val.nil? + # @param clause [LaunchDarkly::Impl::Model::Clause] + # @param context_value [any] + # @return [Boolean] + private def match_any_clause_value(clause, context_value) + op = clause.op + clause.values.any? { |cv| EvaluatorOperators.apply(op, context_value, cv) } + end + + # @param clause [LaunchDarkly::Impl::Model::Clause] + # @param context [LaunchDarkly::LDContext] + # @return [Boolean] + private def clause_match_by_kind(clause, context) + # If attribute is "kind", then we treat operator and values as a match + # expression against a list of all individual kinds in the context. + # That is, for a multi-kind context with kinds of "org" and "user", it + # is a match if either of those strings is a match with Operator and + # Values. - op = clause[:op].to_sym - clause_vals = clause[:values] - result = if user_val.is_a? Enumerable - user_val.any? { |uv| clause_vals.any? { |cv| EvaluatorOperators.apply(op, uv, cv) } } + (0...context.individual_context_count).each do |i| + c = context.individual_context(i) + if !c.nil? && match_any_clause_value(clause, c.kind) + return true + end + end + + false + end + + # @param clause [LaunchDarkly::Impl::Model::Clause] + # @param context [LaunchDarkly::LDContext] + # @return [Boolean] + # @raise [InvalidReferenceException] Raised if the clause.attribute is an invalid reference + private def clause_match_context_no_segments(clause, context) + raise InvalidReferenceException.new(clause.attribute.error) unless clause.attribute.error.nil? + + if clause.attribute.depth == 1 && clause.attribute.component(0) == :kind + result = clause_match_by_kind(clause, context) + return clause.negate ? !result : result + end + + matched_context = context.individual_context(clause.context_kind || LaunchDarkly::LDContext::KIND_DEFAULT) + return false if matched_context.nil? + + context_val = matched_context.get_value_for_reference(clause.attribute) + return false if context_val.nil? + + result = if context_val.is_a? Enumerable + context_val.any? { |uv| match_any_clause_value(clause, uv) } else - clause_vals.any? { |cv| EvaluatorOperators.apply(op, user_val, cv) } + match_any_clause_value(clause, context_val) end - clause[:negate] ? !result : result + clause.negate ? !result : result end - def segment_match_user(segment, user, state) - return false unless user[:key] - segment[:unbounded] ? big_segment_match_user(segment, user, state) : simple_segment_match_user(segment, user, true) + # @param segment [LaunchDarkly::Impl::Model::Segment] + # @param context [LaunchDarkly::LDContext] + # @param eval_result [EvalResult] + # @param state [EvaluatorState] + # @return [Boolean] + private def segment_match_context(segment, context, eval_result, state) + return big_segment_match_context(segment, context, eval_result, state) if segment.unbounded + + simple_segment_match_context(segment, context, true, eval_result, state) end - def big_segment_match_user(segment, user, state) - if !segment[:generation] + # @param segment [LaunchDarkly::Impl::Model::Segment] + # @param context [LaunchDarkly::LDContext] + # @param eval_result [EvalResult] + # @param state [EvaluatorState] + # @return [Boolean] + private def big_segment_match_context(segment, context, eval_result, state) + unless segment.generation # Big segment queries can only be done if the generation is known. If it's unset, # that probably means the data store was populated by an older SDK that doesn't know # about the generation property and therefore dropped it from the JSON data. We'll treat # that as a "not configured" condition. - state.big_segments_status = BigSegmentsStatus::NOT_CONFIGURED + eval_result.big_segments_status = BigSegmentsStatus::NOT_CONFIGURED return false end - if !state.big_segments_status - result = @get_big_segments_membership.nil? ? nil : @get_big_segments_membership.call(user[:key]) + + matched_context = context.individual_context(segment.unbounded_context_kind) + return false if matched_context.nil? + + membership = eval_result.big_segments_membership.nil? ? nil : eval_result.big_segments_membership[matched_context.key] + + if membership.nil? + # Note that this query is just by key; the context kind doesn't matter because any given + # Big Segment can only reference one context kind. So if segment A for the "user" kind + # includes a "user" context with key X, and segment B for the "org" kind includes an "org" + # context with the same key X, it is fine to say that the membership for key X is + # segment A and segment B-- there is no ambiguity. + result = @get_big_segments_membership.nil? ? nil : @get_big_segments_membership.call(matched_context.key) if result - state.big_segments_membership = result.membership - state.big_segments_status = result.status + eval_result.big_segments_status = result.status + + membership = result.membership + eval_result.big_segments_membership = {} if eval_result.big_segments_membership.nil? + eval_result.big_segments_membership[matched_context.key] = membership else - state.big_segments_membership = nil - state.big_segments_status = BigSegmentsStatus::NOT_CONFIGURED + eval_result.big_segments_status = BigSegmentsStatus::NOT_CONFIGURED end end - segment_ref = Evaluator.make_big_segment_ref(segment) - membership = state.big_segments_membership - included = membership.nil? ? nil : membership[segment_ref] - return included if !included.nil? - simple_segment_match_user(segment, user, false) + + membership_result = nil + unless membership.nil? + segment_ref = Evaluator.make_big_segment_ref(segment) + membership_result = membership.nil? ? nil : membership[segment_ref] + end + + return membership_result unless membership_result.nil? + simple_segment_match_context(segment, context, false, eval_result, state) end - def simple_segment_match_user(segment, user, use_includes_and_excludes) + # @param segment [LaunchDarkly::Impl::Model::Segment] + # @param context [LaunchDarkly::LDContext] + # @param use_includes_and_excludes [Boolean] + # @param state [EvaluatorState] + # @return [Boolean] + private def simple_segment_match_context(segment, context, use_includes_and_excludes, eval_result, state) if use_includes_and_excludes - return true if segment[:included].include?(user[:key]) - return false if segment[:excluded].include?(user[:key]) + if EvaluatorHelpers.context_key_in_target_list(context, nil, segment.included) + return true + end + + segment.included_contexts.each do |target| + if EvaluatorHelpers.context_key_in_target_list(context, target.context_kind, target.values) + return true + end + end + + if EvaluatorHelpers.context_key_in_target_list(context, nil, segment.excluded) + return false + end + + segment.excluded_contexts.each do |target| + if EvaluatorHelpers.context_key_in_target_list(context, target.context_kind, target.values) + return false + end + end end - (segment[:rules] || []).each do |r| - return true if segment_rule_match_user(r, user, segment[:key], segment[:salt]) + rules = segment.rules + state.segment_stack.push(segment.key) unless rules.empty? + + begin + rules.each do |r| + return true if segment_rule_match_context(r, context, segment.key, segment.salt, eval_result, state) + end + ensure + state.segment_stack.pop end - return false + false end - def segment_rule_match_user(rule, user, segment_key, salt) - (rule[:clauses] || []).each do |c| - return false unless clause_match_user_no_segments(c, user) + # @param rule [LaunchDarkly::Impl::Model::SegmentRule] + # @param context [LaunchDarkly::LDContext] + # @param segment_key [String] + # @param salt [String] + # @return [Boolean] + # @raise [InvalidReferenceException] + private def segment_rule_match_context(rule, context, segment_key, salt, eval_result, state) + rule.clauses.each do |c| + return false unless clause_match_context(c, context, eval_result, state) end # If the weight is absent, this rule matches - return true if !rule[:weight] - + return true unless rule.weight + # All of the clauses are met. See if the user buckets in - bucket = EvaluatorBucketing.bucket_user(user, segment_key, rule[:bucketBy].nil? ? "key" : rule[:bucketBy], salt, nil) - weight = rule[:weight].to_f / 100000.0 - return bucket < weight + begin + bucket = EvaluatorBucketing.bucket_context(context, rule.rollout_context_kind, segment_key, rule.bucket_by || "key", salt, nil) + rescue InvalidReferenceException + return false + end + + weight = rule.weight.to_f / 100000.0 + bucket.nil? || bucket < weight end - private - - def get_value_for_variation_or_rollout(flag, vr, user, reason, precomputed_results) - index, in_experiment = EvaluatorBucketing.variation_index_for_user(flag, vr, user) + private def get_value_for_variation_or_rollout(flag, vr, context, precomputed_results) + index, in_experiment = EvaluatorBucketing.variation_index_for_context(flag, vr, context) + if index.nil? - @logger.error("[LDClient] Data inconsistency in feature flag \"#{flag[:key]}\": variation/rollout object with no variation or rollout") + @logger.error("[LDClient] Data inconsistency in feature flag \"#{flag.key}\": variation/rollout object with no variation or rollout") return Evaluator.error_result(EvaluationReason::ERROR_MALFORMED_FLAG) end - if precomputed_results - return precomputed_results.for_variation(index, in_experiment) - else - #if in experiment is true, set reason to a different reason instance/singleton with in_experiment set - if in_experiment - if reason.kind == :FALLTHROUGH - reason = EvaluationReason::fallthrough(in_experiment) - elsif reason.kind == :RULE_MATCH - reason = EvaluationReason::rule_match(reason.rule_index, reason.rule_id, in_experiment) + precomputed_results.for_variation(index, in_experiment) + end + + # @param [LaunchDarkly::LDContext] context + # @param [LaunchDarkly::Impl::Model::FeatureFlag] flag + # @return [LaunchDarkly::EvaluationDetail, nil] + private def check_targets(context, flag) + targets = flag.targets + context_targets = flag.context_targets + + if context_targets.empty? + unless targets.empty? + user_context = context.individual_context(LDContext::KIND_DEFAULT) + return nil if user_context.nil? + + targets.each do |target| + if target.values.include?(user_context.key) # rubocop:disable Performance/InefficientHashSearch + return target.match_result + end + end + end + + return nil + end + + context_targets.each do |target| + if target.kind == LDContext::KIND_DEFAULT + user_context = context.individual_context(LDContext::KIND_DEFAULT) + next if user_context.nil? + + user_key = user_context.key + targets.each do |user_target| + if user_target.variation == target.variation + if user_target.values.include?(user_key) # rubocop:disable Performance/InefficientHashSearch + return target.match_result + end + break + end end + elsif EvaluatorHelpers.context_key_in_target_list(context, target.kind, target.values) + return target.match_result end - return EvaluatorHelpers.evaluation_detail_for_variation(flag, index, reason) end + + nil end end end diff --git a/lib/ldclient-rb/impl/evaluator_bucketing.rb b/lib/ldclient-rb/impl/evaluator_bucketing.rb index 11842f74..13b5d9c6 100644 --- a/lib/ldclient-rb/impl/evaluator_bucketing.rb +++ b/lib/ldclient-rb/impl/evaluator_bucketing.rb @@ -1,4 +1,3 @@ - module LaunchDarkly module Impl # Encapsulates the logic for percentage rollouts. @@ -6,64 +5,64 @@ module EvaluatorBucketing # Applies either a fixed variation or a rollout for a rule (or the fallthrough rule). # # @param flag [Object] the feature flag - # @param rule [Object] the rule - # @param user [Object] the user properties - # @return [Number] the variation index, or nil if there is an error - def self.variation_index_for_user(flag, rule, user) - - variation = rule[:variation] - return variation, false if !variation.nil? # fixed variation - rollout = rule[:rollout] + # @param vr [LaunchDarkly::Impl::Model::VariationOrRollout] the variation/rollout properties + # @param context [LaunchDarkly::LDContext] the context properties + # @return [Array<[Number, nil], Boolean>] the variation index, or nil if there is an error + # @raise [InvalidReferenceException] + def self.variation_index_for_context(flag, vr, context) + variation = vr.variation + return variation, false unless variation.nil? # fixed variation + rollout = vr.rollout return nil, false if rollout.nil? - variations = rollout[:variations] + variations = rollout.variations if !variations.nil? && variations.length > 0 # percentage rollout - bucket_by = rollout[:bucketBy].nil? ? "key" : rollout[:bucketBy] + rollout_is_experiment = rollout.is_experiment + bucket_by = rollout_is_experiment ? nil : rollout.bucket_by + bucket_by = 'key' if bucket_by.nil? - seed = rollout[:seed] - bucket = bucket_user(user, flag[:key], bucket_by, flag[:salt], seed) # may not be present - sum = 0; + seed = rollout.seed + bucket = bucket_context(context, rollout.context_kind, flag.key, bucket_by, flag.salt, seed) # may not be present + in_experiment = rollout_is_experiment && !bucket.nil? + sum = 0 variations.each do |variate| - if rollout[:kind] == "experiment" && !variate[:untracked] - in_experiment = true - end - - sum += variate[:weight].to_f / 100000.0 - if bucket < sum - return variate[:variation], !!in_experiment + sum += variate.weight.to_f / 100000.0 + if bucket.nil? || bucket < sum + return variate.variation, in_experiment && !variate.untracked end end - # The user's bucket value was greater than or equal to the end of the last bucket. This could happen due + # The context's bucket value was greater than or equal to the end of the last bucket. This could happen due # to a rounding error, or due to the fact that we are scaling to 100000 rather than 99999, or the flag # data could contain buckets that don't actually add up to 100000. Rather than returning an error in - # this case (or changing the scaling, which would potentially change the results for *all* users), we - # will simply put the user in the last bucket. + # this case (or changing the scaling, which would potentially change the results for *all* contexts), we + # will simply put the context in the last bucket. last_variation = variations[-1] - in_experiment = rollout[:kind] == "experiment" && !last_variation[:untracked] - - [last_variation[:variation], in_experiment] + [last_variation.variation, in_experiment && !last_variation.untracked] else # the rule isn't well-formed [nil, false] end end - # Returns a user's bucket value as a floating-point value in `[0, 1)`. + # Returns a context's bucket value as a floating-point value in `[0, 1)`. # - # @param user [Object] the user properties + # @param context [LDContext] the context properties + # @param context_kind [String, nil] the context kind to match against # @param key [String] the feature flag key (or segment key, if this is for a segment rule) - # @param bucket_by [String|Symbol] the name of the user attribute to be used for bucketing + # @param bucket_by [String|Symbol] the name of the context attribute to be used for bucketing # @param salt [String] the feature flag's or segment's salt value - # @return [Number] the bucket value, from 0 inclusive to 1 exclusive - def self.bucket_user(user, key, bucket_by, salt, seed) - return nil unless user[:key] + # @return [Float, nil] the bucket value, from 0 inclusive to 1 exclusive + # @raise [InvalidReferenceException] Raised if the clause.attribute is an invalid reference + def self.bucket_context(context, context_kind, key, bucket_by, salt, seed) + matched_context = context.individual_context(context_kind || LaunchDarkly::LDContext::KIND_DEFAULT) + return nil if matched_context.nil? - id_hash = bucketable_string_value(EvaluatorOperators.user_value(user, bucket_by)) - if id_hash.nil? - return 0.0 - end + reference = (context_kind.nil? || context_kind.empty?) ? Reference.create_literal(bucket_by) : Reference.create(bucket_by) + raise InvalidReferenceException.new(reference.error) unless reference.error.nil? - if user[:secondary] - id_hash += "." + user[:secondary].to_s - end + context_value = matched_context.get_value_for_reference(reference) + return 0.0 if context_value.nil? + + id_hash = bucketable_string_value(context_value) + return 0.0 if id_hash.nil? if seed hash_key = "%d.%s" % [seed, id_hash] @@ -71,7 +70,7 @@ def self.bucket_user(user, key, bucket_by, salt, seed) hash_key = "%s.%s.%s" % [key, salt, id_hash] end - hash_val = (Digest::SHA1.hexdigest(hash_key))[0..14] + hash_val = Digest::SHA1.hexdigest(hash_key)[0..14] hash_val.to_i(16) / Float(0xFFFFFFFFFFFFFFF) end diff --git a/lib/ldclient-rb/impl/evaluator_helpers.rb b/lib/ldclient-rb/impl/evaluator_helpers.rb index 9629a6aa..842d734f 100644 --- a/lib/ldclient-rb/impl/evaluator_helpers.rb +++ b/lib/ldclient-rb/impl/evaluator_helpers.rb @@ -6,48 +6,45 @@ module LaunchDarkly module Impl module EvaluatorHelpers - def self.off_result(flag, logger = nil) - pre = flag[:_preprocessed] - pre ? pre.off_result : evaluation_detail_for_off_variation(flag, EvaluationReason::off, logger) - end - - def self.target_match_result(target, flag, logger = nil) - pre = target[:_preprocessed] - pre ? pre.match_result : evaluation_detail_for_variation( - flag, target[:variation], EvaluationReason::target_match, logger) - end - - def self.prerequisite_failed_result(prereq, flag, logger = nil) - pre = prereq[:_preprocessed] - pre ? pre.failed_result : evaluation_detail_for_off_variation( - flag, EvaluationReason::prerequisite_failed(prereq[:key]), logger - ) - end - - def self.fallthrough_precomputed_results(flag) - pre = flag[:_preprocessed] - pre ? pre.fallthrough_factory : nil - end - - def self.rule_precomputed_results(rule) - pre = rule[:_preprocessed] - pre ? pre.all_match_results : nil - end - + # + # @param flag [LaunchDarkly::Impl::Model::FeatureFlag] + # @param reason [LaunchDarkly::EvaluationReason] + # def self.evaluation_detail_for_off_variation(flag, reason, logger = nil) - index = flag[:offVariation] + index = flag.off_variation index.nil? ? EvaluationDetail.new(nil, nil, reason) : evaluation_detail_for_variation(flag, index, reason, logger) end + # + # @param flag [LaunchDarkly::Impl::Model::FeatureFlag] + # @param index [Integer] + # @param reason [LaunchDarkly::EvaluationReason] + # def self.evaluation_detail_for_variation(flag, index, reason, logger = nil) - vars = flag[:variations] || [] + vars = flag.variations if index < 0 || index >= vars.length - logger.error("[LDClient] Data inconsistency in feature flag \"#{flag[:key]}\": invalid variation index") unless logger.nil? + logger.error("[LDClient] Data inconsistency in feature flag \"#{flag.key}\": invalid variation index") unless logger.nil? EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) else EvaluationDetail.new(vars[index], index, reason) end end + + # + # @param context [LaunchDarkly::LDContext] + # @param kind [String, nil] + # @param keys [Enumerable] + # @return [Boolean] + # + def self.context_key_in_target_list(context, kind, keys) + return false unless keys.is_a? Enumerable + return false if keys.empty? + + matched_context = context.individual_context(kind || LaunchDarkly::LDContext::KIND_DEFAULT) + return false if matched_context.nil? + + keys.include? matched_context.key + end end end end diff --git a/lib/ldclient-rb/impl/evaluator_operators.rb b/lib/ldclient-rb/impl/evaluator_operators.rb index e54368e9..574c30e0 100644 --- a/lib/ldclient-rb/impl/evaluator_operators.rb +++ b/lib/ldclient-rb/impl/evaluator_operators.rb @@ -9,24 +9,24 @@ module EvaluatorOperators # Applies an operator to produce a boolean result. # # @param op [Symbol] one of the supported LaunchDarkly operators, as a symbol - # @param user_value the value of the user attribute that is referenced in the current clause (left-hand + # @param context_value the value of the context attribute that is referenced in the current clause (left-hand # side of the expression) - # @param clause_value the constant value that `user_value` is being compared to (right-hand side of the + # @param clause_value the constant value that `context_value` is being compared to (right-hand side of the # expression) # @return [Boolean] true if the expression should be considered a match; false if it is not a match, or # if the values cannot be compared because they are of the wrong types, or if the operator is unknown - def self.apply(op, user_value, clause_value) + def self.apply(op, context_value, clause_value) case op when :in - user_value == clause_value + context_value == clause_value when :startsWith - string_op(user_value, clause_value, lambda { |a, b| a.start_with? b }) + string_op(context_value, clause_value, lambda { |a, b| a.start_with? b }) when :endsWith - string_op(user_value, clause_value, lambda { |a, b| a.end_with? b }) + string_op(context_value, clause_value, lambda { |a, b| a.end_with? b }) when :contains - string_op(user_value, clause_value, lambda { |a, b| a.include? b }) + string_op(context_value, clause_value, lambda { |a, b| a.include? b }) when :matches - string_op(user_value, clause_value, lambda { |a, b| + string_op(context_value, clause_value, lambda { |a, b| begin re = Regexp.new b !re.match(a).nil? @@ -35,76 +35,47 @@ def self.apply(op, user_value, clause_value) end }) when :lessThan - numeric_op(user_value, clause_value, lambda { |a, b| a < b }) + numeric_op(context_value, clause_value, lambda { |a, b| a < b }) when :lessThanOrEqual - numeric_op(user_value, clause_value, lambda { |a, b| a <= b }) + numeric_op(context_value, clause_value, lambda { |a, b| a <= b }) when :greaterThan - numeric_op(user_value, clause_value, lambda { |a, b| a > b }) + numeric_op(context_value, clause_value, lambda { |a, b| a > b }) when :greaterThanOrEqual - numeric_op(user_value, clause_value, lambda { |a, b| a >= b }) + numeric_op(context_value, clause_value, lambda { |a, b| a >= b }) when :before - date_op(user_value, clause_value, lambda { |a, b| a < b }) + date_op(context_value, clause_value, lambda { |a, b| a < b }) when :after - date_op(user_value, clause_value, lambda { |a, b| a > b }) + date_op(context_value, clause_value, lambda { |a, b| a > b }) when :semVerEqual - semver_op(user_value, clause_value, lambda { |a, b| a == b }) + semver_op(context_value, clause_value, lambda { |a, b| a == b }) when :semVerLessThan - semver_op(user_value, clause_value, lambda { |a, b| a < b }) + semver_op(context_value, clause_value, lambda { |a, b| a < b }) when :semVerGreaterThan - semver_op(user_value, clause_value, lambda { |a, b| a > b }) + semver_op(context_value, clause_value, lambda { |a, b| a > b }) when :segmentMatch # We should never reach this; it can't be evaluated based on just two parameters, because it requires - # looking up the segment from the data store. Instead, we special-case this operator in clause_match_user. + # looking up the segment from the data store. Instead, we special-case this operator in clause_match_context. false else false end end - # Retrieves the value of a user attribute by name. - # - # Built-in attributes correspond to top-level properties in the user object. They are treated as strings and - # non-string values are coerced to strings, except for `anonymous` which is meant to be a boolean if present - # and is not currently coerced. This behavior is consistent with earlier versions of the Ruby SDK, but is not - # guaranteed to be consistent with other SDKs, since the evaluator specification is based on the strongly-typed - # SDKs where it is not possible for an attribute to have the wrong type. - # - # Custom attributes correspond to properties within the `custom` property, if any, and can be of any type. - # - # @param user [Object] the user properties - # @param attribute [String|Symbol] the attribute to get, for instance `:key` or `:name` or `:some_custom_attr` - # @return the attribute value, or nil if the attribute is unknown - def self.user_value(user, attribute) - attribute = attribute.to_sym - if BUILTINS.include? attribute - value = user[attribute] - return nil if value.nil? - (attribute == :anonymous) ? value : value.to_s - elsif !user[:custom].nil? - user[:custom][attribute] - else - nil - end - end - private - BUILTINS = Set[:key, :secondary, :ip, :country, :email, :firstName, :lastName, :avatar, :name, :anonymous] NUMERIC_VERSION_COMPONENTS_REGEX = Regexp.new("^[0-9.]*") - - private_constant :BUILTINS private_constant :NUMERIC_VERSION_COMPONENTS_REGEX - def self.string_op(user_value, clause_value, fn) - (user_value.is_a? String) && (clause_value.is_a? String) && fn.call(user_value, clause_value) + def self.string_op(context_value, clause_value, fn) + (context_value.is_a? String) && (clause_value.is_a? String) && fn.call(context_value, clause_value) end - def self.numeric_op(user_value, clause_value, fn) - (user_value.is_a? Numeric) && (clause_value.is_a? Numeric) && fn.call(user_value, clause_value) + def self.numeric_op(context_value, clause_value, fn) + (context_value.is_a? Numeric) && (clause_value.is_a? Numeric) && fn.call(context_value, clause_value) end - def self.date_op(user_value, clause_value, fn) - ud = to_date(user_value) + def self.date_op(context_value, clause_value, fn) + ud = to_date(context_value) if !ud.nil? cd = to_date(clause_value) !cd.nil? && fn.call(ud, cd) @@ -113,8 +84,8 @@ def self.date_op(user_value, clause_value, fn) end end - def self.semver_op(user_value, clause_value, fn) - uv = to_semver(user_value) + def self.semver_op(context_value, clause_value, fn) + uv = to_semver(context_value) if !uv.nil? cv = to_semver(clause_value) !cv.nil? && fn.call(uv, cv) diff --git a/lib/ldclient-rb/impl/event_sender.rb b/lib/ldclient-rb/impl/event_sender.rb index 442af033..76395a1c 100644 --- a/lib/ldclient-rb/impl/event_sender.rb +++ b/lib/ldclient-rb/impl/event_sender.rb @@ -8,7 +8,7 @@ module Impl EventSenderResult = Struct.new(:success, :must_shutdown, :time_from_server) class EventSender - CURRENT_SCHEMA_VERSION = 3 + CURRENT_SCHEMA_VERSION = 4 DEFAULT_RETRY_INTERVAL = 1 def initialize(sdk_key, config, http_client = nil, retry_interval = DEFAULT_RETRY_INTERVAL) @@ -33,7 +33,7 @@ def send_event_data(event_data, description, is_diagnostic) begin http_client = @http_client_pool.acquire() response = nil - (0..1).each do |attempt| + 2.times do |attempt| if attempt > 0 @logger.warn { "[LDClient] Will retry posting events after #{@retry_interval} second" } sleep(@retry_interval) @@ -43,13 +43,13 @@ def send_event_data(event_data, description, is_diagnostic) headers = {} headers["content-type"] = "application/json" Impl::Util.default_http_headers(@sdk_key, @config).each { |k, v| headers[k] = v } - if !is_diagnostic + unless is_diagnostic headers["X-LaunchDarkly-Event-Schema"] = CURRENT_SCHEMA_VERSION.to_s headers["X-LaunchDarkly-Payload-ID"] = payload_id end response = http_client.request("POST", uri, { headers: headers, - body: event_data + body: event_data, }) rescue StandardError => exn @logger.warn { "[LDClient] Error sending events: #{exn.inspect}." } @@ -60,7 +60,7 @@ def send_event_data(event_data, description, is_diagnostic) body = response.to_s if status >= 200 && status < 300 res_time = nil - if !response.headers["date"].nil? + unless response.headers["date"].nil? begin res_time = Time.httpdate(response.headers["date"]) rescue ArgumentError @@ -77,7 +77,7 @@ def send_event_data(event_data, description, is_diagnostic) end end # used up our retries - return EventSenderResult.new(false, false, nil) + EventSenderResult.new(false, false, nil) ensure @http_client_pool.release(http_client) end diff --git a/lib/ldclient-rb/impl/event_summarizer.rb b/lib/ldclient-rb/impl/event_summarizer.rb index 5c9dcc1a..4109f80a 100644 --- a/lib/ldclient-rb/impl/event_summarizer.rb +++ b/lib/ldclient-rb/impl/event_summarizer.rb @@ -1,15 +1,16 @@ require "ldclient-rb/impl/event_types" +require "set" module LaunchDarkly module Impl EventSummary = Struct.new(:start_date, :end_date, :counters) - EventSummaryFlagInfo = Struct.new(:default, :versions) + EventSummaryFlagInfo = Struct.new(:default, :versions, :context_kinds) EventSummaryFlagVariationCounter = Struct.new(:value, :count) # Manages the state of summarizable information for the EventProcessor, including the - # event counters and user deduplication. Note that the methods of this class are + # event counters and context deduplication. Note that the methods of this class are # deliberately not thread-safe; the EventProcessor is responsible for enforcing # synchronization across both the summarizer and the event queue. class EventSummarizer @@ -22,26 +23,31 @@ def initialize # Adds this event to our counters, if it is a type of event we need to count. def summarize_event(event) - return if !event.is_a?(LaunchDarkly::Impl::EvalEvent) + return unless event.is_a?(LaunchDarkly::Impl::EvalEvent) counters_for_flag = @counters[event.key] if counters_for_flag.nil? - counters_for_flag = EventSummaryFlagInfo.new(event.default, Hash.new) + counters_for_flag = EventSummaryFlagInfo.new(event.default, Hash.new, Set.new) @counters[event.key] = counters_for_flag end + counters_for_flag_version = counters_for_flag.versions[event.version] if counters_for_flag_version.nil? counters_for_flag_version = Hash.new counters_for_flag.versions[event.version] = counters_for_flag_version end + + counters_for_flag.context_kinds.merge(event.context.kinds) + variation_counter = counters_for_flag_version[event.variation] if variation_counter.nil? counters_for_flag_version[event.variation] = EventSummaryFlagVariationCounter.new(event.value, 1) else variation_counter.count = variation_counter.count + 1 end + time = event.timestamp - if !time.nil? + unless time.nil? @start_date = time if @start_date == 0 || time < @start_date @end_date = time if time > @end_date end @@ -49,8 +55,7 @@ def summarize_event(event) # Returns a snapshot of the current summarized event data, and resets this state. def snapshot - ret = EventSummary.new(@start_date, @end_date, @counters) - ret + EventSummary.new(@start_date, @end_date, @counters) end def clear diff --git a/lib/ldclient-rb/impl/event_types.rb b/lib/ldclient-rb/impl/event_types.rb index 6ca043ba..1be03eb8 100644 --- a/lib/ldclient-rb/impl/event_types.rb +++ b/lib/ldclient-rb/impl/event_types.rb @@ -1,20 +1,23 @@ module LaunchDarkly module Impl class Event - def initialize(timestamp, user) + # @param timestamp [Integer] + # @param context [LaunchDarkly::LDContext] + def initialize(timestamp, context) @timestamp = timestamp - @user = user + @context = context end + # @return [Integer] attr_reader :timestamp - attr_reader :kind - attr_reader :user + # @return [LaunchDarkly::LDContext] + attr_reader :context end class EvalEvent < Event - def initialize(timestamp, user, key, version = nil, variation = nil, value = nil, reason = nil, default = nil, + def initialize(timestamp, context, key, version = nil, variation = nil, value = nil, reason = nil, default = nil, track_events = false, debug_until = nil, prereq_of = nil) - super(timestamp, user) + super(timestamp, context) @key = key @version = version @variation = variation @@ -39,17 +42,17 @@ def initialize(timestamp, user, key, version = nil, variation = nil, value = nil end class IdentifyEvent < Event - def initialize(timestamp, user) - super(timestamp, user) + def initialize(timestamp, context) + super(timestamp, context) end end class CustomEvent < Event - def initialize(timestamp, user, key, data = nil, metric_value = nil) - super(timestamp, user) + def initialize(timestamp, context, key, data = nil, metric_value = nil) + super(timestamp, context) @key = key - @data = data if !data.nil? - @metric_value = metric_value if !metric_value.nil? + @data = data unless data.nil? + @metric_value = metric_value unless metric_value.nil? end attr_reader :key @@ -57,30 +60,15 @@ def initialize(timestamp, user, key, data = nil, metric_value = nil) attr_reader :metric_value end - class AliasEvent < Event - def initialize(timestamp, key, context_kind, previous_key, previous_context_kind) - super(timestamp, nil) - @key = key - @context_kind = context_kind - @previous_key = previous_key - @previous_context_kind = previous_context_kind - end - - attr_reader :key - attr_reader :context_kind - attr_reader :previous_key - attr_reader :previous_context_kind - end - class IndexEvent < Event - def initialize(timestamp, user) - super(timestamp, user) + def initialize(timestamp, context) + super(timestamp, context) end end class DebugEvent < Event def initialize(eval_event) - super(eval_event.timestamp, eval_event.user) + super(eval_event.timestamp, eval_event.context) @eval_event = eval_event end diff --git a/lib/ldclient-rb/impl/integrations/consul_impl.rb b/lib/ldclient-rb/impl/integrations/consul_impl.rb index 2f186dab..f5043fb9 100644 --- a/lib/ldclient-rb/impl/integrations/consul_impl.rb +++ b/lib/ldclient-rb/impl/integrations/consul_impl.rb @@ -16,14 +16,14 @@ class ConsulFeatureStoreCore end def initialize(opts) - if !CONSUL_ENABLED + unless CONSUL_ENABLED raise RuntimeError.new("can't use Consul feature store without the 'diplomat' gem") end @prefix = (opts[:prefix] || LaunchDarkly::Integrations::Consul.default_prefix) + '/' @logger = opts[:logger] || Config.default_logger - Diplomat.configuration = opts[:consul_config] if !opts[:consul_config].nil? - Diplomat.configuration.url = opts[:url] if !opts[:url].nil? + Diplomat.configuration = opts[:consul_config] unless opts[:consul_config].nil? + Diplomat.configuration.url = opts[:url] unless opts[:url].nil? @logger.info("ConsulFeatureStore: using Consul host at #{Diplomat.configuration.url}") end @@ -51,10 +51,10 @@ def init_internal(all_data) unused_old_keys.each do |key| ops.push({ 'KV' => { 'Verb' => 'delete', 'Key' => key } }) end - + # Now set the special key that we check in initialized_internal? ops.push({ 'KV' => { 'Verb' => 'set', 'Key' => inited_key, 'Value' => '' } }) - + ConsulUtil.batch_operations(ops) @logger.info { "Initialized database with #{num_items} items" } @@ -70,7 +70,7 @@ def get_all_internal(kind) results = Diplomat::Kv.get(kind_key(kind), { recurse: true }, :return) (results == "" ? [] : results).each do |result| value = result[:value] - if !value.nil? + unless value.nil? item = Model.deserialize(kind, value) items_out[item[:key].to_sym] = item end @@ -132,7 +132,7 @@ def item_key(kind, key) def kind_key(kind) @prefix + kind[:namespace] + '/' end - + def inited_key @prefix + '$inited' end diff --git a/lib/ldclient-rb/impl/integrations/dynamodb_impl.rb b/lib/ldclient-rb/impl/integrations/dynamodb_impl.rb index 7244fc9b..0995b36b 100644 --- a/lib/ldclient-rb/impl/integrations/dynamodb_impl.rb +++ b/lib/ldclient-rb/impl/integrations/dynamodb_impl.rb @@ -16,28 +16,28 @@ class DynamoDBStoreImplBase AWS_SDK_ENABLED = false end end - + PARTITION_KEY = "namespace" SORT_KEY = "key" def initialize(table_name, opts) - if !AWS_SDK_ENABLED + unless AWS_SDK_ENABLED raise RuntimeError.new("can't use #{description} without the aws-sdk or aws-sdk-dynamodb gem") end - + @table_name = table_name @prefix = opts[:prefix] ? (opts[:prefix] + ":") : "" @logger = opts[:logger] || Config.default_logger - + if !opts[:existing_client].nil? @client = opts[:existing_client] else @client = Aws::DynamoDB::Client.new(opts[:dynamodb_opts] || {}) end - + @logger.info("#{description}: using DynamoDB table \"#{table_name}\"") end - + def stop # AWS client doesn't seem to have a close method end @@ -46,7 +46,7 @@ def stop "DynamoDB" end end - + # # Internal implementation of the DynamoDB feature store, intended to be used with CachingStoreWrapper. # @@ -83,7 +83,7 @@ def init_internal(all_data) del_item = make_keys_hash(tuple[0], tuple[1]) requests.push({ delete_request: { key: del_item } }) end - + # Now set the special key that we check in initialized_internal? inited_item = make_keys_hash(inited_key, inited_key) requests.push({ put_request: { item: inited_item } }) @@ -123,11 +123,11 @@ def upsert_internal(kind, new_item) expression_attribute_names: { "#namespace" => PARTITION_KEY, "#key" => SORT_KEY, - "#version" => VERSION_ATTRIBUTE + "#version" => VERSION_ATTRIBUTE, }, expression_attribute_values: { - ":version" => new_item[:version] - } + ":version" => new_item[:version], + }, }) new_item rescue Aws::DynamoDB::Errors::ConditionalCheckFailedException @@ -159,7 +159,7 @@ def inited_key def make_keys_hash(namespace, key) { PARTITION_KEY => namespace, - SORT_KEY => key + SORT_KEY => key, } end @@ -170,16 +170,16 @@ def make_query_for_kind(kind) key_conditions: { PARTITION_KEY => { comparison_operator: "EQ", - attribute_value_list: [ namespace_for_kind(kind) ] - } - } + attribute_value_list: [ namespace_for_kind(kind) ], + }, + }, } end def get_item_by_keys(namespace, key) @client.get_item({ table_name: @table_name, - key: make_keys_hash(namespace, key) + key: make_keys_hash(namespace, key), }) end @@ -190,8 +190,8 @@ def read_existing_keys(kinds) projection_expression: "#namespace, #key", expression_attribute_names: { "#namespace" => PARTITION_KEY, - "#key" => SORT_KEY - } + "#key" => SORT_KEY, + }, }) while true resp = @client.query(req) @@ -210,7 +210,7 @@ def read_existing_keys(kinds) def marshal_item(kind, item) make_keys_hash(namespace_for_kind(kind), item[:key]).merge({ VERSION_ATTRIBUTE => item[:version], - ITEM_JSON_ATTRIBUTE => Model.serialize(kind, item) + ITEM_JSON_ATTRIBUTE => Model.serialize(kind, item), }) end @@ -223,11 +223,11 @@ def unmarshal_item(kind, item) end class DynamoDBBigSegmentStore < DynamoDBStoreImplBase - KEY_METADATA = 'big_segments_metadata'; - KEY_USER_DATA = 'big_segments_user'; - ATTR_SYNC_TIME = 'synchronizedOn'; - ATTR_INCLUDED = 'included'; - ATTR_EXCLUDED = 'excluded'; + KEY_METADATA = 'big_segments_metadata' + KEY_CONTEXT_DATA = 'big_segments_user' + ATTR_SYNC_TIME = 'synchronizedOn' + ATTR_INCLUDED = 'included' + ATTR_EXCLUDED = 'excluded' def initialize(table_name, opts) super(table_name, opts) @@ -243,7 +243,7 @@ def get_metadata table_name: @table_name, key: { PARTITION_KEY => key, - SORT_KEY => key + SORT_KEY => key, } ) timestamp = data.item && data.item[ATTR_SYNC_TIME] ? @@ -251,14 +251,14 @@ def get_metadata LaunchDarkly::Interfaces::BigSegmentStoreMetadata.new(timestamp) end - def get_membership(user_hash) + def get_membership(context_hash) data = @client.get_item( table_name: @table_name, key: { - PARTITION_KEY => @prefix + KEY_USER_DATA, - SORT_KEY => user_hash + PARTITION_KEY => @prefix + KEY_CONTEXT_DATA, + SORT_KEY => context_hash, }) - return nil if !data.item + return nil unless data.item excluded_refs = data.item[ATTR_EXCLUDED] || [] included_refs = data.item[ATTR_INCLUDED] || [] if excluded_refs.empty? && included_refs.empty? diff --git a/lib/ldclient-rb/impl/integrations/file_data_source.rb b/lib/ldclient-rb/impl/integrations/file_data_source.rb index d89e4e95..d8f22745 100644 --- a/lib/ldclient-rb/impl/integrations/file_data_source.rb +++ b/lib/ldclient-rb/impl/integrations/file_data_source.rb @@ -48,7 +48,7 @@ def initialized? def start ready = Concurrent::Event.new - + # We will return immediately regardless of whether the file load succeeded or failed - # the difference can be detected by checking "initialized?" ready.set @@ -63,9 +63,9 @@ def start ready end - + def stop - @listener.stop if !@listener.nil? + @listener.stop unless @listener.nil? end private @@ -73,7 +73,7 @@ def stop def load_all all_data = { FEATURES => {}, - SEGMENTS => {} + SEGMENTS => {}, } @paths.each do |path| begin @@ -121,12 +121,12 @@ def symbolize_all_keys(value) def add_item(all_data, kind, item) items = all_data[kind] - raise ArgumentError, "Received unknown item kind #{kind} in add_data" if items.nil? # shouldn't be possible since we preinitialize the hash + raise ArgumentError, "Received unknown item kind #{kind[:namespace]} in add_data" if items.nil? # shouldn't be possible since we preinitialize the hash key = item[:key].to_sym - if !items[key].nil? + unless items[key].nil? raise ArgumentError, "#{kind[:namespace]} key \"#{item[:key]}\" was used more than once" end - items[key] = item + items[key] = Model.deserialize(kind, item) end def make_flag_with_value(key, value) @@ -134,7 +134,7 @@ def make_flag_with_value(key, value) key: key, on: true, fallthrough: { variation: 0 }, - variations: [ value ] + variations: [ value ], } end diff --git a/lib/ldclient-rb/impl/integrations/redis_impl.rb b/lib/ldclient-rb/impl/integrations/redis_impl.rb index 193a50da..14739e97 100644 --- a/lib/ldclient-rb/impl/integrations/redis_impl.rb +++ b/lib/ldclient-rb/impl/integrations/redis_impl.rb @@ -5,6 +5,87 @@ module LaunchDarkly module Impl module Integrations module Redis + # + # An implementation of the LaunchDarkly client's feature store that uses a Redis + # instance. This object holds feature flags and related data received from the + # streaming API. Feature data can also be further cached in memory to reduce overhead + # of calls to Redis. + # + # To use this class, you must first have the `redis` and `connection-pool` gems + # installed. Then, create an instance and store it in the `feature_store` property + # of your client configuration. + # + class RedisFeatureStore + include LaunchDarkly::Interfaces::FeatureStore + + # Note that this class is now just a facade around CachingStoreWrapper, which is in turn delegating + # to RedisFeatureStoreCore where the actual database logic is. This class was retained for historical + # reasons, so that existing code can still call RedisFeatureStore.new. In the future, we will migrate + # away from exposing these concrete classes and use factory methods instead. + + # + # Constructor for a RedisFeatureStore instance. + # + # @param opts [Hash] the configuration options + # @option opts [String] :redis_url URL of the Redis instance (shortcut for omitting redis_opts) + # @option opts [Hash] :redis_opts options to pass to the Redis constructor (if you want to specify more than just redis_url) + # @option opts [String] :prefix namespace prefix to add to all hash keys used by LaunchDarkly + # @option opts [Logger] :logger a `Logger` instance; defaults to `Config.default_logger` + # @option opts [Integer] :max_connections size of the Redis connection pool + # @option opts [Integer] :expiration expiration time for the in-memory cache, in seconds; 0 for no local caching + # @option opts [Integer] :capacity maximum number of feature flags (or related objects) to cache locally + # @option opts [Object] :pool custom connection pool, if desired + # @option opts [Boolean] :pool_shutdown_on_close whether calling `close` should shutdown the custom connection pool. + # + def initialize(opts = {}) + core = RedisFeatureStoreCore.new(opts) + @wrapper = LaunchDarkly::Integrations::Util::CachingStoreWrapper.new(core, opts) + end + + # + # Default value for the `redis_url` constructor parameter; points to an instance of Redis + # running at `localhost` with its default port. + # + def self.default_redis_url + LaunchDarkly::Integrations::Redis::default_redis_url + end + + # + # Default value for the `prefix` constructor parameter. + # + def self.default_prefix + LaunchDarkly::Integrations::Redis::default_prefix + end + + def get(kind, key) + @wrapper.get(kind, key) + end + + def all(kind) + @wrapper.all(kind) + end + + def delete(kind, key, version) + @wrapper.delete(kind, key, version) + end + + def init(all_data) + @wrapper.init(all_data) + end + + def upsert(kind, item) + @wrapper.upsert(kind, item) + end + + def initialized? + @wrapper.initialized? + end + + def stop + @wrapper.stop + end + end + class RedisStoreImplBase begin require "redis" @@ -15,7 +96,7 @@ class RedisStoreImplBase end def initialize(opts) - if !REDIS_ENABLED + unless REDIS_ENABLED raise RuntimeError.new("can't use #{description} because one of these gems is missing: redis, connection_pool") end @@ -28,7 +109,7 @@ def initialize(opts) @logger = opts[:logger] || Config.default_logger @test_hook = opts[:test_hook] # used for unit tests, deliberately undocumented - @stopped = Concurrent::AtomicBoolean.new(false) + @stopped = Concurrent::AtomicBoolean.new() with_connection do |redis| @logger.info("#{description}: using Redis instance at #{redis.connection[:host]}:#{redis.connection[:port]} and prefix: #{@prefix}") @@ -55,13 +136,11 @@ def stop if opts[:redis_url] redis_opts[:url] = opts[:redis_url] end - if !redis_opts.include?(:url) + unless redis_opts.include?(:url) redis_opts[:url] = LaunchDarkly::Integrations::Redis::default_redis_url end max_connections = opts[:max_connections] || 16 - return opts[:pool] || ConnectionPool.new(size: max_connections) do - ::Redis.new(redis_opts) - end + opts[:pool] || ConnectionPool.new(size: max_connections) { ::Redis.new(redis_opts) } end end @@ -135,6 +214,7 @@ def upsert_internal(kind, new_item) else final_item = old_item action = new_item[:deleted] ? "delete" : "update" + # rubocop:disable Layout/LineLength @logger.warn { "RedisFeatureStore: attempted to #{action} #{key} version: #{old_item[:version]} in '#{kind[:namespace]}' with a version that is the same or older: #{new_item[:version]}" } end redis.unwatch @@ -151,7 +231,7 @@ def initialized_internal? private def before_update_transaction(base_key, key) - @test_hook.before_update_transaction(base_key, key) if !@test_hook.nil? + @test_hook.before_update_transaction(base_key, key) unless @test_hook.nil? end def items_key(kind) @@ -176,8 +256,8 @@ def get_redis(redis, kind, key) # class RedisBigSegmentStore < RedisStoreImplBase KEY_LAST_UP_TO_DATE = ':big_segments_synchronized_on' - KEY_USER_INCLUDE = ':big_segment_include:' - KEY_USER_EXCLUDE = ':big_segment_exclude:' + KEY_CONTEXT_INCLUDE = ':big_segment_include:' + KEY_CONTEXT_EXCLUDE = ':big_segment_exclude:' def description "RedisBigSegmentStore" @@ -188,10 +268,10 @@ def get_metadata Interfaces::BigSegmentStoreMetadata.new(value.nil? ? nil : value.to_i) end - def get_membership(user_hash) + def get_membership(context_hash) with_connection do |redis| - included_refs = redis.smembers(@prefix + KEY_USER_INCLUDE + user_hash) - excluded_refs = redis.smembers(@prefix + KEY_USER_EXCLUDE + user_hash) + included_refs = redis.smembers(@prefix + KEY_CONTEXT_INCLUDE + context_hash) + excluded_refs = redis.smembers(@prefix + KEY_CONTEXT_EXCLUDE + context_hash) if !included_refs && !excluded_refs nil else diff --git a/lib/ldclient-rb/impl/model/clause.rb b/lib/ldclient-rb/impl/model/clause.rb new file mode 100644 index 00000000..0227dc30 --- /dev/null +++ b/lib/ldclient-rb/impl/model/clause.rb @@ -0,0 +1,39 @@ + +# See serialization.rb for implementation notes on the data model classes. + +module LaunchDarkly + module Impl + module Model + class Clause + def initialize(data, logger) + @data = data + @context_kind = data[:contextKind] + @attribute = (@context_kind.nil? || @context_kind.empty?) ? Reference.create_literal(data[:attribute]) : Reference.create(data[:attribute]) + unless logger.nil? || @attribute.error.nil? + logger.error("[LDClient] Data inconsistency in feature flag: #{@attribute.error}") + end + @op = data[:op].to_sym + @values = data[:values] || [] + @negate = !!data[:negate] + end + + # @return [Hash] + attr_reader :data + # @return [String|nil] + attr_reader :context_kind + # @return [LaunchDarkly::Reference] + attr_reader :attribute + # @return [Symbol] + attr_reader :op + # @return [Array] + attr_reader :values + # @return [Boolean] + attr_reader :negate + + def as_json + @data + end + end + end + end +end diff --git a/lib/ldclient-rb/impl/model/feature_flag.rb b/lib/ldclient-rb/impl/model/feature_flag.rb new file mode 100644 index 00000000..2f89905c --- /dev/null +++ b/lib/ldclient-rb/impl/model/feature_flag.rb @@ -0,0 +1,213 @@ +require "ldclient-rb/impl/evaluator_helpers" +require "ldclient-rb/impl/model/clause" +require "set" + +# See serialization.rb for implementation notes on the data model classes. + +module LaunchDarkly + module Impl + module Model + class FeatureFlag + # @param data [Hash] + # @param logger [Logger|nil] + def initialize(data, logger = nil) + raise ArgumentError, "expected hash but got #{data.class}" unless data.is_a?(Hash) + @data = data + @key = data[:key] + @version = data[:version] + @deleted = !!data[:deleted] + return if @deleted + @variations = data[:variations] || [] + @on = !!data[:on] + fallthrough = data[:fallthrough] || {} + @fallthrough = VariationOrRollout.new(fallthrough[:variation], fallthrough[:rollout]) + @off_variation = data[:offVariation] + @prerequisites = (data[:prerequisites] || []).map do |prereq_data| + Prerequisite.new(prereq_data, self, logger) + end + @targets = (data[:targets] || []).map do |target_data| + Target.new(target_data, self, logger) + end + @context_targets = (data[:contextTargets] || []).map do |target_data| + Target.new(target_data, self, logger) + end + @rules = (data[:rules] || []).map.with_index do |rule_data, index| + FlagRule.new(rule_data, index, self, logger) + end + @salt = data[:salt] + @off_result = EvaluatorHelpers.evaluation_detail_for_off_variation(self, EvaluationReason::off, logger) + @fallthrough_results = Preprocessor.precompute_multi_variation_results(self, + EvaluationReason::fallthrough(false), EvaluationReason::fallthrough(true)) + end + + # @return [Hash] + attr_reader :data + # @return [String] + attr_reader :key + # @return [Integer] + attr_reader :version + # @return [Boolean] + attr_reader :deleted + # @return [Array] + attr_reader :variations + # @return [Boolean] + attr_reader :on + # @return [Integer|nil] + attr_reader :off_variation + # @return [LaunchDarkly::Impl::Model::VariationOrRollout] + attr_reader :fallthrough + # @return [LaunchDarkly::EvaluationDetail] + attr_reader :off_result + # @return [LaunchDarkly::Impl::Model::EvalResultFactoryMultiVariations] + attr_reader :fallthrough_results + # @return [Array] + attr_reader :prerequisites + # @return [Array] + attr_reader :targets + # @return [Array] + attr_reader :context_targets + # @return [Array] + attr_reader :rules + # @return [String] + attr_reader :salt + + # This method allows us to read properties of the object as if it's just a hash. Currently this is + # necessary because some data store logic is still written to expect hashes; we can remove it once + # we migrate entirely to using attributes of the class. + def [](key) + @data[key] + end + + def ==(other) + other.is_a?(FeatureFlag) && other.data == self.data + end + + def as_json(*) # parameter is unused, but may be passed if we're using the json gem + @data + end + + # Same as as_json, but converts the JSON structure into a string. + def to_json(*a) + as_json.to_json(a) + end + end + + class Prerequisite + def initialize(data, flag, logger) + @data = data + @key = data[:key] + @variation = data[:variation] + @failure_result = EvaluatorHelpers.evaluation_detail_for_off_variation(flag, + EvaluationReason::prerequisite_failed(@key), logger) + end + + # @return [Hash] + attr_reader :data + # @return [String] + attr_reader :key + # @return [Integer] + attr_reader :variation + # @return [LaunchDarkly::EvaluationDetail] + attr_reader :failure_result + end + + class Target + def initialize(data, flag, logger) + @kind = data[:contextKind] || LDContext::KIND_DEFAULT + @data = data + @values = Set.new(data[:values] || []) + @variation = data[:variation] + @match_result = EvaluatorHelpers.evaluation_detail_for_variation(flag, + data[:variation], EvaluationReason::target_match, logger) + end + + # @return [String] + attr_reader :kind + # @return [Hash] + attr_reader :data + # @return [Set] + attr_reader :values + # @return [Integer] + attr_reader :variation + # @return [LaunchDarkly::EvaluationDetail] + attr_reader :match_result + end + + class FlagRule + def initialize(data, rule_index, flag, logger) + @data = data + @clauses = (data[:clauses] || []).map do |clause_data| + Clause.new(clause_data, logger) + end + @variation_or_rollout = VariationOrRollout.new(data[:variation], data[:rollout]) + rule_id = data[:id] + match_reason = EvaluationReason::rule_match(rule_index, rule_id) + match_reason_in_experiment = EvaluationReason::rule_match(rule_index, rule_id, true) + @match_results = Preprocessor.precompute_multi_variation_results(flag, match_reason, match_reason_in_experiment) + end + + # @return [Hash] + attr_reader :data + # @return [Array] + attr_reader :clauses + # @return [LaunchDarkly::Impl::Model::EvalResultFactoryMultiVariations] + attr_reader :match_results + # @return [LaunchDarkly::Impl::Model::VariationOrRollout] + attr_reader :variation_or_rollout + end + + class VariationOrRollout + def initialize(variation, rollout_data) + @variation = variation + @rollout = rollout_data.nil? ? nil : Rollout.new(rollout_data) + end + + # @return [Integer|nil] + attr_reader :variation + # @return [Rollout|nil] currently we do not have a model class for the rollout + attr_reader :rollout + end + + class Rollout + def initialize(data) + @context_kind = data[:contextKind] + @variations = (data[:variations] || []).map { |v| WeightedVariation.new(v) } + @bucket_by = data[:bucketBy] + @kind = data[:kind] + @is_experiment = @kind == "experiment" + @seed = data[:seed] + end + + # @return [String|nil] + attr_reader :context_kind + # @return [Array] + attr_reader :variations + # @return [String|nil] + attr_reader :bucket_by + # @return [String|nil] + attr_reader :kind + # @return [Boolean] + attr_reader :is_experiment + # @return [Integer|nil] + attr_reader :seed + end + + class WeightedVariation + def initialize(data) + @variation = data[:variation] + @weight = data[:weight] + @untracked = !!data[:untracked] + end + + # @return [Integer] + attr_reader :variation + # @return [Integer] + attr_reader :weight + # @return [Boolean] + attr_reader :untracked + end + + # Clause is defined in its own file because clauses are used by both flags and segments + end + end +end diff --git a/lib/ldclient-rb/impl/model/preprocessed_data.rb b/lib/ldclient-rb/impl/model/preprocessed_data.rb index 3118ddba..088add02 100644 --- a/lib/ldclient-rb/impl/model/preprocessed_data.rb +++ b/lib/ldclient-rb/impl/model/preprocessed_data.rb @@ -2,7 +2,7 @@ module LaunchDarkly module Impl - module DataModelPreprocessing + module Model # # Container for a precomputed result that includes a specific variation index and value, an # evaluation reason, and optionally an alternate evaluation reason that corresponds to the @@ -18,7 +18,7 @@ def initialize(value, variation_index, regular_reason, in_experiment_reason = ni # @param in_experiment [Boolean] indicates whether we want the result to include # "inExperiment: true" in the reason or not - # @return [EvaluationDetail] + # @return [LaunchDarkly::EvaluationDetail] def get_result(in_experiment = false) in_experiment ? @in_experiment_result : @regular_result end @@ -35,135 +35,22 @@ def initialize(variation_factories) # @param index [Integer] the variation index # @param in_experiment [Boolean] indicates whether we want the result to include # "inExperiment: true" in the reason or not + # @return [LaunchDarkly::EvaluationDetail] def for_variation(index, in_experiment) if index < 0 || index >= @factories.length EvaluationDetail.new(nil, nil, EvaluationReason.error(EvaluationReason::ERROR_MALFORMED_FLAG)) else @factories[index].get_result(in_experiment) end - end - end - - # Base class for all of the preprocessed data classes we embed in our data model. Using this class - # ensures that none of its properties will be included in JSON representations. It also overrides - # == to say that it is always equal with another instance of the same class; equality tests on - # this class are only ever done in test code, and we want the contents of these classes to be - # ignored in test code unless we are looking at specific attributes. - class PreprocessedDataBase - def as_json(*) - nil - end - - def to_json(*a) - "null" - end - - def ==(other) - other.class == self.class - end - end - - class FlagPreprocessed < PreprocessedDataBase - def initialize(off_result, fallthrough_factory) - @off_result = off_result - @fallthrough_factory = fallthrough_factory - end - - # @return [EvalResultsForSingleVariation] - attr_reader :off_result - # @return [EvalResultFactoryMultiVariations] - attr_reader :fallthrough_factory - end - - class PrerequisitePreprocessed < PreprocessedDataBase - def initialize(failed_result) - @failed_result = failed_result - end - - # @return [EvalResultsForSingleVariation] - attr_reader :failed_result - end - - class TargetPreprocessed < PreprocessedDataBase - def initialize(match_result) - @match_result = match_result end - - # @return [EvalResultsForSingleVariation] - attr_reader :match_result - end - - class FlagRulePreprocessed < PreprocessedDataBase - def initialize(all_match_results) - @all_match_results = all_match_results - end - - # @return [EvalResultsForSingleVariation] - attr_reader :all_match_results end class Preprocessor - def initialize(logger = nil) - @logger = logger - end - - def preprocess_item!(kind, item) - if kind.eql? FEATURES - preprocess_flag!(item) - elsif kind.eql? SEGMENTS - preprocess_segment!(item) - end - end - - def preprocess_all_items!(kind, items_map) - return items_map if !items_map - items_map.each do |key, item| - preprocess_item!(kind, item) - end - end - - def preprocess_flag!(flag) - flag[:_preprocessed] = FlagPreprocessed.new( - EvaluatorHelpers.off_result(flag), - precompute_multi_variation_results(flag, EvaluationReason::fallthrough(false), EvaluationReason::fallthrough(true)) - ) - (flag[:prerequisites] || []).each do |prereq| - preprocess_prerequisite!(prereq, flag) - end - (flag[:targets] || []).each do |target| - preprocess_target!(target, flag) - end - rules = flag[:rules] - (rules || []).each_index do |index| - preprocess_flag_rule!(rules[index], index, flag) - end - end - - def preprocess_segment!(segment) - # nothing to do for segments currently - end - - private def preprocess_prerequisite!(prereq, flag) - prereq[:_preprocessed] = PrerequisitePreprocessed.new( - EvaluatorHelpers.prerequisite_failed_result(prereq, flag, @logger) - ) - end - - private def preprocess_target!(target, flag) - target[:_preprocessed] = TargetPreprocessed.new( - EvaluatorHelpers.target_match_result(target, flag, @logger) - ) - end - - private def preprocess_flag_rule!(rule, index, flag) - match_reason = EvaluationReason::rule_match(index, rule[:id]) - match_reason_in_experiment = EvaluationReason::rule_match(index, rule[:id], true) - rule[:_preprocessed] = FlagRulePreprocessed.new( - precompute_multi_variation_results(flag, match_reason, match_reason_in_experiment) - ) - end - - private def precompute_multi_variation_results(flag, regular_reason, in_experiment_reason) + # @param flag [LaunchDarkly::Impl::Model::FeatureFlag] + # @param regular_reason [LaunchDarkly::EvaluationReason] + # @param in_experiment_reason [LaunchDarkly::EvaluationReason] + # @return [EvalResultFactoryMultiVariations] + def self.precompute_multi_variation_results(flag, regular_reason, in_experiment_reason) factories = [] vars = flag[:variations] || [] vars.each_index do |index| diff --git a/lib/ldclient-rb/impl/model/segment.rb b/lib/ldclient-rb/impl/model/segment.rb new file mode 100644 index 00000000..d78036a7 --- /dev/null +++ b/lib/ldclient-rb/impl/model/segment.rb @@ -0,0 +1,126 @@ +require "ldclient-rb/impl/model/clause" +require "ldclient-rb/impl/model/preprocessed_data" +require "set" + +# See serialization.rb for implementation notes on the data model classes. + +module LaunchDarkly + module Impl + module Model + class Segment + # @param data [Hash] + # @param logger [Logger|nil] + def initialize(data, logger = nil) + raise ArgumentError, "expected hash but got #{data.class}" unless data.is_a?(Hash) + @data = data + @key = data[:key] + @version = data[:version] + @deleted = !!data[:deleted] + return if @deleted + @included = data[:included] || [] + @excluded = data[:excluded] || [] + @included_contexts = (data[:includedContexts] || []).map do |target_data| + SegmentTarget.new(target_data) + end + @excluded_contexts = (data[:excludedContexts] || []).map do |target_data| + SegmentTarget.new(target_data) + end + @rules = (data[:rules] || []).map do |rule_data| + SegmentRule.new(rule_data, logger) + end + @unbounded = !!data[:unbounded] + @unbounded_context_kind = data[:unboundedContextKind] || LDContext::KIND_DEFAULT + @generation = data[:generation] + @salt = data[:salt] + end + + # @return [Hash] + attr_reader :data + # @return [String] + attr_reader :key + # @return [Integer] + attr_reader :version + # @return [Boolean] + attr_reader :deleted + # @return [Array] + attr_reader :included + # @return [Array] + attr_reader :excluded + # @return [Array] + attr_reader :included_contexts + # @return [Array] + attr_reader :excluded_contexts + # @return [Array] + attr_reader :rules + # @return [Boolean] + attr_reader :unbounded + # @return [String] + attr_reader :unbounded_context_kind + # @return [Integer|nil] + attr_reader :generation + # @return [String] + attr_reader :salt + + # This method allows us to read properties of the object as if it's just a hash. Currently this is + # necessary because some data store logic is still written to expect hashes; we can remove it once + # we migrate entirely to using attributes of the class. + def [](key) + @data[key] + end + + def ==(other) + other.is_a?(Segment) && other.data == self.data + end + + def as_json(*) # parameter is unused, but may be passed if we're using the json gem + @data + end + + # Same as as_json, but converts the JSON structure into a string. + def to_json(*a) + as_json.to_json(a) + end + end + + class SegmentTarget + def initialize(data) + @data = data + @context_kind = data[:contextKind] + @values = Set.new(data[:values] || []) + end + + # @return [Hash] + attr_reader :data + # @return [String] + attr_reader :context_kind + # @return [Set] + attr_reader :values + end + + class SegmentRule + def initialize(data, logger) + @data = data + @clauses = (data[:clauses] || []).map do |clause_data| + Clause.new(clause_data, logger) + end + @weight = data[:weight] + @bucket_by = data[:bucketBy] + @rollout_context_kind = data[:rolloutContextKind] + end + + # @return [Hash] + attr_reader :data + # @return [Array] + attr_reader :clauses + # @return [Integer|nil] + attr_reader :weight + # @return [String|nil] + attr_reader :bucket_by + # @return [String|nil] + attr_reader :rollout_context_kind + end + + # Clause is defined in its own file because clauses are used by both flags and segments + end + end +end diff --git a/lib/ldclient-rb/impl/model/serialization.rb b/lib/ldclient-rb/impl/model/serialization.rb index 1d306f46..3bc3029d 100644 --- a/lib/ldclient-rb/impl/model/serialization.rb +++ b/lib/ldclient-rb/impl/model/serialization.rb @@ -1,31 +1,71 @@ +require "ldclient-rb/impl/model/feature_flag" require "ldclient-rb/impl/model/preprocessed_data" +require "ldclient-rb/impl/model/segment" + +# General implementation notes about the data model classes in LaunchDarkly::Impl::Model-- +# +# As soon as we receive flag/segment JSON data from LaunchDarkly (or, read it from a database), we +# transform it into the model classes FeatureFlag, Segment, etc. The constructor of each of these +# classes takes a hash (the parsed JSON), and transforms it into an internal representation that +# is more efficient for evaluations. +# +# Validation works as follows: +# - A property value that is of the correct type, but is invalid for other reasons (for example, +# if a flag rule refers to variation index 5, but there are only 2 variations in the flag), does +# not prevent the flag from being parsed and stored. It does cause a warning to be logged, if a +# logger was passed to the constructor. +# - If a value is completely invalid for the schema, the constructor may throw an +# exception, causing the whole data set to be rejected. This is consistent with the behavior of +# the strongly-typed SDKs. +# +# Currently, the model classes also retain the original hash of the parsed JSON. This is because +# we may need to re-serialize them to JSON, and building the JSON on the fly would be very +# inefficient, so each model class has a to_json method that just returns the same Hash. If we +# are able in the future to either use a custom streaming serializer, or pass the JSON data +# straight through from LaunchDarkly to a database instead of re-serializing, we could stop +# retaining this data. module LaunchDarkly module Impl module Model # Abstraction of deserializing a feature flag or segment that was read from a data store or # received from LaunchDarkly. - def self.deserialize(kind, json, logger = nil) - return nil if json.nil? - item = JSON.parse(json, symbolize_names: true) - DataModelPreprocessing::Preprocessor.new(logger).preprocess_item!(kind, item) - item + # + # SDK code outside of Impl::Model should use this method instead of calling the model class + # constructors directly, so as not to rely on implementation details. + # + # @param kind [Hash] normally either FEATURES or SEGMENTS + # @param input [object] a JSON string or a parsed hash (or a data model object, in which case + # we'll just return the original object) + # @param logger [Logger|nil] logs warnings if there are any data validation problems + # @return [Object] the flag or segment (or, for an unknown data kind, the data as a hash) + def self.deserialize(kind, input, logger = nil) + return nil if input.nil? + return input if !input.is_a?(String) && !input.is_a?(Hash) + data = input.is_a?(Hash) ? input : JSON.parse(input, symbolize_names: true) + case kind + when FEATURES + FeatureFlag.new(data, logger) + when SEGMENTS + Segment.new(data, logger) + else + data + end end # Abstraction of serializing a feature flag or segment that will be written to a data store. - # Currently we just call to_json. + # Currently we just call to_json, but SDK code outside of Impl::Model should use this method + # instead of to_json, so as not to rely on implementation details. def self.serialize(kind, item) item.to_json end # Translates a { flags: ..., segments: ... } object received from LaunchDarkly to the data store format. def self.make_all_store_data(received_data, logger = nil) - preprocessor = DataModelPreprocessing::Preprocessor.new(logger) - flags = received_data[:flags] - preprocessor.preprocess_all_items!(FEATURES, flags) - segments = received_data[:segments] - preprocessor.preprocess_all_items!(SEGMENTS, segments) - { FEATURES => flags, SEGMENTS => segments } + { + FEATURES => (received_data[:flags] || {}).transform_values { |data| FeatureFlag.new(data, logger) }, + SEGMENTS => (received_data[:segments] || {}).transform_values { |data| Segment.new(data, logger) }, + } end end end diff --git a/lib/ldclient-rb/impl/repeating_task.rb b/lib/ldclient-rb/impl/repeating_task.rb index bb0255fe..299454cc 100644 --- a/lib/ldclient-rb/impl/repeating_task.rb +++ b/lib/ldclient-rb/impl/repeating_task.rb @@ -19,7 +19,7 @@ def start if @start_delay sleep(@start_delay) end - while !@stopped.value do + until @stopped.value do started_at = Time.now begin @task.call diff --git a/lib/ldclient-rb/impl/store_data_set_sorter.rb b/lib/ldclient-rb/impl/store_data_set_sorter.rb index 4454fe75..9ad15729 100644 --- a/lib/ldclient-rb/impl/store_data_set_sorter.rb +++ b/lib/ldclient-rb/impl/store_data_set_sorter.rb @@ -33,7 +33,7 @@ def self.sort_collection(kind, input) return input if dependency_fn.nil? || input.empty? remaining_items = input.clone items_out = {} - while !remaining_items.empty? + until remaining_items.empty? # pick a random item that hasn't been updated yet key, item = remaining_items.first self.add_with_dependencies_first(item, dependency_fn, remaining_items, items_out) @@ -46,7 +46,7 @@ def self.add_with_dependencies_first(item, dependency_fn, remaining_items, items remaining_items.delete(item_key) # we won't need to visit this item again dependency_fn.call(item).each do |dep_key| dep_item = remaining_items[dep_key.to_sym] - self.add_with_dependencies_first(dep_item, dependency_fn, remaining_items, items_out) if !dep_item.nil? + self.add_with_dependencies_first(dep_item, dependency_fn, remaining_items, items_out) unless dep_item.nil? end items_out[item_key] = item end diff --git a/lib/ldclient-rb/impl/unbounded_pool.rb b/lib/ldclient-rb/impl/unbounded_pool.rb index 55bd515f..c8219241 100644 --- a/lib/ldclient-rb/impl/unbounded_pool.rb +++ b/lib/ldclient-rb/impl/unbounded_pool.rb @@ -25,7 +25,7 @@ def release(instance) def dispose_all @lock.synchronize { - @pool.map { |instance| @instance_destructor.call(instance) } if !@instance_destructor.nil? + @pool.map { |instance| @instance_destructor.call(instance) } unless @instance_destructor.nil? @pool.clear() } end diff --git a/lib/ldclient-rb/impl/util.rb b/lib/ldclient-rb/impl/util.rb index 165ce885..6c9801bb 100644 --- a/lib/ldclient-rb/impl/util.rb +++ b/lib/ldclient-rb/impl/util.rb @@ -1,7 +1,7 @@ module LaunchDarkly module Impl module Util - def self.is_bool(aObject) + def self.bool?(aObject) [true,false].include? aObject end @@ -56,7 +56,7 @@ def self.validate_application_value(value, name, logger) return "" end - if value.match(/[^a-zA-Z0-9._-]/) + if /[^a-zA-Z0-9._-]/.match?(value) logger.warn { "Value of application[#{name}] contained invalid characters and was discarded" } return "" end diff --git a/lib/ldclient-rb/in_memory_store.rb b/lib/ldclient-rb/in_memory_store.rb index 576d90c7..dcef4529 100644 --- a/lib/ldclient-rb/in_memory_store.rb +++ b/lib/ldclient-rb/in_memory_store.rb @@ -14,13 +14,13 @@ module LaunchDarkly FEATURES = { namespace: "features", priority: 1, # that is, features should be stored after segments - get_dependency_keys: lambda { |flag| (flag[:prerequisites] || []).map { |p| p[:key] } } + get_dependency_keys: lambda { |flag| (flag[:prerequisites] || []).map { |p| p[:key] } }, }.freeze # @private SEGMENTS = { namespace: "segments", - priority: 0 + priority: 0, }.freeze # diff --git a/lib/ldclient-rb/integrations/consul.rb b/lib/ldclient-rb/integrations/consul.rb index b3947047..1365baf9 100644 --- a/lib/ldclient-rb/integrations/consul.rb +++ b/lib/ldclient-rb/integrations/consul.rb @@ -38,7 +38,7 @@ def self.default_prefix # def self.new_feature_store(opts = {}) core = LaunchDarkly::Impl::Integrations::Consul::ConsulFeatureStoreCore.new(opts) - return LaunchDarkly::Integrations::Util::CachingStoreWrapper.new(core, opts) + LaunchDarkly::Integrations::Util::CachingStoreWrapper.new(core, opts) end end end diff --git a/lib/ldclient-rb/integrations/dynamodb.rb b/lib/ldclient-rb/integrations/dynamodb.rb index 29aedcdb..52e05cf3 100644 --- a/lib/ldclient-rb/integrations/dynamodb.rb +++ b/lib/ldclient-rb/integrations/dynamodb.rb @@ -54,7 +54,7 @@ def self.new_feature_store(table_name, opts = {}) # # Creates a DynamoDB-backed Big Segment store. # - # Big Segments are a specific type of user segments. For more information, read the LaunchDarkly + # Big Segments are a specific type of segments. For more information, read the LaunchDarkly # documentation: https://docs.launchdarkly.com/home/users/big-segments # # To use this method, you must first install one of the AWS SDK gems: either `aws-sdk-dynamodb`, or diff --git a/lib/ldclient-rb/integrations/file_data.rb b/lib/ldclient-rb/integrations/file_data.rb index 370d3aa6..4c356667 100644 --- a/lib/ldclient-rb/integrations/file_data.rb +++ b/lib/ldclient-rb/integrations/file_data.rb @@ -25,7 +25,7 @@ module Integrations # # - `flags`: Feature flag definitions. # - `flagValues`: Simplified feature flags that contain only a value. - # - `segments`: User segment definitions. + # - `segments`: Context segment definitions. # # The format of the data in `flags` and `segments` is defined by the LaunchDarkly application # and is subject to change. Rather than trying to construct these objects yourself, it is simpler @@ -78,7 +78,7 @@ module Integrations # same flag key or segment key more than once, either in a single file or across multiple files. # # If the data source encounters any error in any file-- malformed content, a missing file, or a - # duplicate key-- it will not load flags from any of the files. + # duplicate key-- it will not load flags from any of the files. # module FileData # @@ -100,7 +100,7 @@ module FileData # @return an object that can be stored in {Config#data_source} # def self.data_source(options={}) - return lambda { |sdk_key, config| + lambda { |sdk_key, config| Impl::Integrations::FileDataSourceImpl.new(config.feature_store, config.logger, options) } end end diff --git a/lib/ldclient-rb/integrations/redis.rb b/lib/ldclient-rb/integrations/redis.rb index 95147286..0e5bf68c 100644 --- a/lib/ldclient-rb/integrations/redis.rb +++ b/lib/ldclient-rb/integrations/redis.rb @@ -1,4 +1,4 @@ -require "ldclient-rb/redis_store" # eventually we will just refer to impl/integrations/redis_impl directly +require "ldclient-rb/impl/integrations/redis_impl" module LaunchDarkly module Integrations @@ -59,13 +59,13 @@ def self.default_prefix # @return [LaunchDarkly::Interfaces::FeatureStore] a feature store object # def self.new_feature_store(opts = {}) - return RedisFeatureStore.new(opts) + LaunchDarkly::Impl::Integrations::Redis::RedisFeatureStore.new(opts) end # # Creates a Redis-backed Big Segment store. # - # Big Segments are a specific type of user segments. For more information, read the LaunchDarkly + # Big Segments are a specific type of segments. For more information, read the LaunchDarkly # documentation: https://docs.launchdarkly.com/home/users/big-segments # # To use this method, you must first have the `redis` and `connection-pool` gems installed. Then, @@ -91,7 +91,7 @@ def self.new_feature_store(opts = {}) # @return [LaunchDarkly::Interfaces::BigSegmentStore] a Big Segment store object # def self.new_big_segment_store(opts) - return LaunchDarkly::Impl::Integrations::Redis::RedisBigSegmentStore.new(opts) + LaunchDarkly::Impl::Integrations::Redis::RedisBigSegmentStore.new(opts) end end end diff --git a/lib/ldclient-rb/integrations/test_data.rb b/lib/ldclient-rb/integrations/test_data.rb index 8cbcc980..3a810507 100644 --- a/lib/ldclient-rb/integrations/test_data.rb +++ b/lib/ldclient-rb/integrations/test_data.rb @@ -1,4 +1,6 @@ require 'ldclient-rb/impl/integrations/test_data/test_data_source' +require 'ldclient-rb/impl/model/feature_flag' +require 'ldclient-rb/impl/model/segment' require 'ldclient-rb/integrations/test_data/flag_builder' require 'concurrent/atomics' @@ -14,12 +16,12 @@ module Integrations # # @example # td = LaunchDarkly::Integrations::TestData.data_source - # td.update(td.flag("flag-key-1").variation_for_all_users(true)) + # td.update(td.flag("flag-key-1").variation_for_all(true)) # config = LaunchDarkly::Config.new(data_source: td) # client = LaunchDarkly::LDClient.new('sdkKey', config) # # flags can be updated at any time: # td.update(td.flag("flag-key-2") - # .variation_for_user("some-user-key", true) + # .variation_for_key("user", some-user-key", true) # .fallthrough_variation(false)) # # The above example uses a simple boolean flag, but more complex configurations are possible using @@ -77,7 +79,7 @@ def call(_, config) # starts with the same configuration that was last provided for this flag. # # Otherwise, it starts with a new default configuration in which the flag has `true` and - # `false` variations, is `true` for all users when targeting is turned on and + # `false` variations, is `true` for all contexts when targeting is turned on and # `false` otherwise, and currently has targeting turned on. You can change any of those # properties, and provide more complex behavior, using the {FlagBuilder} methods. # @@ -119,7 +121,7 @@ def update(flag_builder) if @current_flags[flag_key] then version = @current_flags[flag_key][:version] end - new_flag = flag_builder.build(version+1) + new_flag = Impl::Model.deserialize(FEATURES, flag_builder.build(version+1)) @current_flags[flag_key] = new_flag end update_item(FEATURES, new_flag) @@ -149,15 +151,15 @@ def use_preconfigured_flag(flag) end # - # Copies a full user segment data model object into the test data. + # Copies a full segment data model object into the test data. # # It immediately propagates the change to any `LDClient` instance(s) that you have already # configured to use this `TestData`. If no `LDClient` has been started yet, it simply adds # this segment to the test data which will be provided to any LDClient that you subsequently # configure. # - # This method is currently the only way to inject user segment data, since there is no builder - # API for segments. It is mainly intended for the SDK's own tests of user segment functionality, + # This method is currently the only way to inject segment data, since there is no builder + # API for segments. It is mainly intended for the SDK's own tests of segment functionality, # since application tests that need to produce a desired evaluation state could do so more easily # by just setting flag values. # @@ -169,12 +171,14 @@ def use_preconfigured_segment(segment) end private def use_preconfigured_item(kind, item, current) - key = item[:key].to_sym + item = Impl::Model.deserialize(kind, item) + key = item.key.to_sym @lock.with_write_lock do old_item = current[key] - if !old_item.nil? then - item = item.clone - item[:version] = old_item[:version] + 1 + unless old_item.nil? then + data = item.as_json + data[:version] = old_item.version + 1 + item = Impl::Model.deserialize(kind, data) end current[key] = item end @@ -195,7 +199,7 @@ def make_init_data @lock.with_read_lock do { FEATURES => @current_flags.clone, - SEGMENTS => @current_segments.clone + SEGMENTS => @current_segments.clone, } end end diff --git a/lib/ldclient-rb/integrations/test_data/flag_builder.rb b/lib/ldclient-rb/integrations/test_data/flag_builder.rb index 79d6247b..2b8a495d 100644 --- a/lib/ldclient-rb/integrations/test_data/flag_builder.rb +++ b/lib/ldclient-rb/integrations/test_data/flag_builder.rb @@ -45,7 +45,7 @@ def on(on) # # Specifies the fallthrough variation. The fallthrough is the value - # that is returned if targeting is on and the user was not matched by a more specific + # that is returned if targeting is on and the context was not matched by a more specific # target or rule. # # If the flag was previously configured with other variations and the variation specified is a boolean, @@ -56,7 +56,7 @@ def on(on) # @return [FlagBuilder] the builder # def fallthrough_variation(variation) - if LaunchDarkly::Impl::Util.is_bool variation then + if LaunchDarkly::Impl::Util.bool? variation boolean_flag.fallthrough_variation(variation_for_boolean(variation)) else @fallthrough_variation = variation @@ -76,7 +76,7 @@ def fallthrough_variation(variation) # @return [FlagBuilder] the builder # def off_variation(variation) - if LaunchDarkly::Impl::Util.is_bool variation then + if LaunchDarkly::Impl::Util.bool? variation boolean_flag.off_variation(variation_for_boolean(variation)) else @off_variation = variation @@ -108,7 +108,7 @@ def variations(*variations) end # - # Sets the flag to always return the specified variation for all users. + # Sets the flag to always return the specified variation for all contexts. # # The variation is specified, Targeting is switched on, and any existing targets or rules are removed. # The fallthrough variation is set to the specified value. The off variation is left unchanged. @@ -120,31 +120,41 @@ def variations(*variations) # 0 for the first, 1 for the second, etc. # @return [FlagBuilder] the builder # - def variation_for_all_users(variation) - if LaunchDarkly::Impl::Util.is_bool variation then - boolean_flag.variation_for_all_users(variation_for_boolean(variation)) + def variation_for_all(variation) + if LaunchDarkly::Impl::Util.bool? variation + boolean_flag.variation_for_all(variation_for_boolean(variation)) else - on(true).clear_rules.clear_user_targets.fallthrough_variation(variation) + on(true).clear_rules.clear_targets.fallthrough_variation(variation) end end # - # Sets the flag to always return the specified variation value for all users. + # @deprecated Backwards compatibility alias for #variation_for_all + # + alias_method :variation_for_all_users, :variation_for_all + + # + # Sets the flag to always return the specified variation value for all context. # # The value may be of any valid JSON type. This method changes the # flag to have only a single variation, which is this value, and to return the same # variation regardless of whether targeting is on or off. Any existing targets or rules # are removed. # - # @param value [Object] the desired value to be returned for all users + # @param value [Object] the desired value to be returned for all contexts # @return [FlagBuilder] the builder # - def value_for_all_users(value) - variations(value).variation_for_all_users(0) + def value_for_all(value) + variations(value).variation_for_all(0) end # - # Sets the flag to return the specified variation for a specific user key when targeting + # @deprecated Backwards compatibility alias for #value_for_all + # + alias_method :value_for_all_users, :value_for_all + + # + # Sets the flag to return the specified variation for a specific context key when targeting # is on. # # This has no effect when targeting is turned off for the flag. @@ -152,36 +162,87 @@ def value_for_all_users(value) # If the flag was previously configured with other variations and the variation specified is a boolean, # this also changes it to a boolean flag. # - # @param user_key [String] a user key + # @param context_kind [String] a context kind + # @param context_key [String] a context key # @param variation [Boolean, Integer] true or false or the desired variation index to return: # 0 for the first, 1 for the second, etc. # @return [FlagBuilder] the builder # - def variation_for_user(user_key, variation) - if LaunchDarkly::Impl::Util.is_bool variation then - boolean_flag.variation_for_user(user_key, variation_for_boolean(variation)) - else - if @targets.nil? then - @targets = Hash.new - end - @variations.count.times do | i | - if i == variation then - if @targets[i].nil? then - @targets[i] = [user_key] - else - @targets[i].push(user_key) - end - elsif not @targets[i].nil? then - @targets[i].delete(user_key) + def variation_for_key(context_kind, context_key, variation) + if LaunchDarkly::Impl::Util.bool? variation + return boolean_flag.variation_for_key(context_kind, context_key, variation_for_boolean(variation)) + end + + if @targets.nil? + @targets = Hash.new + end + + targets = @targets[context_kind] || [] + @variations.count.times do | i | + if i == variation + if targets[i].nil? + targets[i] = [context_key] + else + targets[i].push(context_key) end + elsif not targets[i].nil? + targets[i].delete(context_key) end - self end + + @targets[context_kind] = targets + + self + end + + # + # Sets the flag to return the specified variation for a specific user key when targeting + # is on. + # + # This is a shortcut for calling {variation_for_key} with + # `LaunchDarkly::LDContext::KIND_DEFAULT` as the context kind. + # + # This has no effect when targeting is turned off for the flag. + # + # If the flag was previously configured with other variations and the variation specified is a boolean, + # this also changes it to a boolean flag. + # + # @param user_key [String] a user key + # @param variation [Boolean, Integer] true or false or the desired variation index to return: + # 0 for the first, 1 for the second, etc. + # @return [FlagBuilder] the builder + # + def variation_for_user(user_key, variation) + variation_for_key(LaunchDarkly::LDContext::KIND_DEFAULT, user_key, variation) end # # Starts defining a flag rule, using the "is one of" operator. # + # @example create a rule that returns `true` if the name is "Patsy" or "Edina" and the context kind is "user" + # testData.flag("flag") + # .if_match_context("user", :name, 'Patsy', 'Edina') + # .then_return(true); + # + # @param context_kind [String] a context kind + # @param attribute [Symbol] the context attribute to match against + # @param values [Array] values to compare to + # @return [FlagRuleBuilder] a flag rule builder + # + # @see FlagRuleBuilder#then_return + # @see FlagRuleBuilder#and_match + # @see FlagRuleBuilder#and_not_match + # + def if_match_context(context_kind, attribute, *values) + FlagRuleBuilder.new(self).and_match_context(context_kind, attribute, *values) + end + + # + # Starts defining a flag rule, using the "is one of" operator. + # + # This is a shortcut for calling {if_match_context} with + # `LaunchDarkly::LDContext::KIND_DEFAULT` as the context kind. + # # @example create a rule that returns `true` if the name is "Patsy" or "Edina" # testData.flag("flag") # .if_match(:name, 'Patsy', 'Edina') @@ -196,12 +257,36 @@ def variation_for_user(user_key, variation) # @see FlagRuleBuilder#and_not_match # def if_match(attribute, *values) - FlagRuleBuilder.new(self).and_match(attribute, *values) + if_match_context(LaunchDarkly::LDContext::KIND_DEFAULT, attribute, *values) + end + + # + # Starts defining a flag rule, using the "is not one of" operator. + # + # @example create a rule that returns `true` if the name is neither "Saffron" nor "Bubble" + # testData.flag("flag") + # .if_not_match_context("user", :name, 'Saffron', 'Bubble') + # .then_return(true) + # + # @param context_kind [String] a context kind + # @param attribute [Symbol] the context attribute to match against + # @param values [Array] values to compare to + # @return [FlagRuleBuilder] a flag rule builder + # + # @see FlagRuleBuilder#then_return + # @see FlagRuleBuilder#and_match + # @see FlagRuleBuilder#and_not_match + # + def if_not_match_context(context_kind, attribute, *values) + FlagRuleBuilder.new(self).and_not_match_context(context_kind, attribute, *values) end # # Starts defining a flag rule, using the "is not one of" operator. # + # This is a shortcut for calling {if_not_match_context} with + # `LaunchDarkly::LDContext::KIND_DEFAULT` as the context kind. + # # @example create a rule that returns `true` if the name is neither "Saffron" nor "Bubble" # testData.flag("flag") # .if_not_match(:name, 'Saffron', 'Bubble') @@ -216,20 +301,25 @@ def if_match(attribute, *values) # @see FlagRuleBuilder#and_not_match # def if_not_match(attribute, *values) - FlagRuleBuilder.new(self).and_not_match(attribute, *values) + if_not_match_context(LaunchDarkly::LDContext::KIND_DEFAULT, attribute, *values) end # - # Removes any existing user targets from the flag. - # This undoes the effect of methods like {#variation_for_user} + # Removes any existing targets from the flag. + # This undoes the effect of methods like {#variation_for_key} # # @return [FlagBuilder] the same builder # - def clear_user_targets + def clear_targets @targets = nil self end + # + # @deprecated Backwards compatibility alias for #clear_targets + # + alias_method :clear_user_targets, :clear_targets + # # Removes any existing rules from the flag. # This undoes the effect of methods like {#if_match} @@ -243,7 +333,7 @@ def clear_rules # @private def add_rule(rule) - if @rules.nil? then + if @rules.nil? @rules = Array.new end @rules.push(rule) @@ -261,7 +351,7 @@ def add_rule(rule) # @return [FlagBuilder] the builder # def boolean_flag - if is_boolean_flag then + if boolean_flag? self else variations(true, false) @@ -278,22 +368,36 @@ def build(version) variations: @variations, } - unless @off_variation.nil? then + unless @off_variation.nil? res[:offVariation] = @off_variation end - unless @fallthrough_variation.nil? then + unless @fallthrough_variation.nil? res[:fallthrough] = { variation: @fallthrough_variation } end - unless @targets.nil? then - res[:targets] = @targets.collect do | variation, values | - { variation: variation, values: values } + unless @targets.nil? + targets = [] + context_targets = [] + + @targets.each do |kind, targets_for_kind| + targets_for_kind.each_with_index do |values, variation| + next if values.nil? + if kind == LaunchDarkly::LDContext::KIND_DEFAULT + targets << { variation: variation, values: values } + context_targets << { contextKind: LaunchDarkly::LDContext::KIND_DEFAULT, variation: variation, values: [] } + else + context_targets << { contextKind: kind, variation: variation, values: values } + end + end end + + res[:targets] = targets + res[:contextTargets] = context_targets end - unless @rules.nil? then - res[:rules] = @rules.each_with_index.collect { | rule, i | rule.build(i) } + unless @rules.nil? + res[:rules] = @rules.each_with_index.map { | rule, i | rule.build(i) } end res @@ -303,8 +407,8 @@ def build(version) # A builder for feature flag rules to be used with {FlagBuilder}. # # In the LaunchDarkly model, a flag can have any number of rules, and a rule can have any number of - # clauses. A clause is an individual test such as "name is 'X'". A rule matches a user if all of the - # rule's clauses match the user. + # clauses. A clause is an individual test such as "name is 'X'". A rule matches a context if all of the + # rule's clauses match the context. # # To start defining a rule, use one of the flag builder's matching methods such as # {FlagBuilder#if_match}. This defines the first clause for the rule. @@ -314,7 +418,7 @@ def build(version) # class FlagRuleBuilder # @private - FlagRuleClause = Struct.new(:attribute, :op, :values, :negate, keyword_init: true) + FlagRuleClause = Struct.new(:contextKind, :attribute, :op, :values, :negate, keyword_init: true) # @private def initialize(flag_builder) @@ -331,6 +435,34 @@ def intialize_copy(other) # # Adds another clause, using the "is one of" operator. # + # @example create a rule that returns `true` if the name is "Patsy", the country is "gb", and the context kind is "user" + # testData.flag("flag") + # .if_match_context("user", :name, 'Patsy') + # .and_match_context("user", :country, 'gb') + # .then_return(true) + # + # @param context_kind [String] a context kind + # @param attribute [Symbol] the context attribute to match against + # @param values [Array] values to compare to + # @return [FlagRuleBuilder] the rule builder + # + def and_match_context(context_kind, attribute, *values) + @clauses.push(FlagRuleClause.new( + contextKind: context_kind, + attribute: attribute, + op: 'in', + values: values, + negate: false + )) + self + end + + # + # Adds another clause, using the "is one of" operator. + # + # This is a shortcut for calling {and_match_context} with + # `LaunchDarkly::LDContext::KIND_DEFAULT` as the context kind. + # # @example create a rule that returns `true` if the name is "Patsy" and the country is "gb" # testData.flag("flag") # .if_match(:name, 'Patsy') @@ -342,11 +474,30 @@ def intialize_copy(other) # @return [FlagRuleBuilder] the rule builder # def and_match(attribute, *values) + and_match_context(LaunchDarkly::LDContext::KIND_DEFAULT, attribute, *values) + end + + # + # Adds another clause, using the "is not one of" operator. + # + # @example create a rule that returns `true` if the name is "Patsy" and the country is not "gb" + # testData.flag("flag") + # .if_match_context("user", :name, 'Patsy') + # .and_not_match_context("user", :country, 'gb') + # .then_return(true) + # + # @param context_kind [String] a context kind + # @param attribute [Symbol] the context attribute to match against + # @param values [Array] values to compare to + # @return [FlagRuleBuilder] the rule builder + # + def and_not_match_context(context_kind, attribute, *values) @clauses.push(FlagRuleClause.new( + contextKind: context_kind, attribute: attribute, op: 'in', values: values, - negate: false + negate: true )) self end @@ -354,6 +505,9 @@ def and_match(attribute, *values) # # Adds another clause, using the "is not one of" operator. # + # This is a shortcut for calling {and_not_match} with + # `LaunchDarkly::LDContext::KIND_DEFAULT` as the context kind. + # # @example create a rule that returns `true` if the name is "Patsy" and the country is not "gb" # testData.flag("flag") # .if_match(:name, 'Patsy') @@ -365,13 +519,7 @@ def and_match(attribute, *values) # @return [FlagRuleBuilder] the rule builder # def and_not_match(attribute, *values) - @clauses.push(FlagRuleClause.new( - attribute: attribute, - op: 'in', - values: values, - negate: true - )) - self + and_not_match_context(LaunchDarkly::LDContext::KIND_DEFAULT, attribute, *values) end # @@ -386,7 +534,7 @@ def and_not_match(attribute, *values) # @return [FlagBuilder] the flag builder with this rule added # def then_return(variation) - if LaunchDarkly::Impl::Util.is_bool variation then + if LaunchDarkly::Impl::Util.bool? variation @variation = @flag_builder.variation_for_boolean(variation) @flag_builder.boolean_flag.add_rule(self) else @@ -400,7 +548,7 @@ def build(ri) { id: 'rule' + ri.to_s, variation: @variation, - clauses: @clauses.collect(&:to_h) + clauses: @clauses.map(&:to_h), } end end @@ -415,15 +563,23 @@ def variation_for_boolean(variation) TRUE_VARIATION_INDEX = 0 FALSE_VARIATION_INDEX = 1 - def is_boolean_flag + def boolean_flag? @variations.size == 2 && - @variations[TRUE_VARIATION_INDEX] == true && - @variations[FALSE_VARIATION_INDEX] == false + @variations[TRUE_VARIATION_INDEX] == true && + @variations[FALSE_VARIATION_INDEX] == false end def deep_copy_hash(from) to = Hash.new - from.each { |k, v| to[k] = v.clone } + from.each do |k, v| + if v.is_a?(Hash) + to[k] = deep_copy_hash(v) + elsif v.is_a?(Array) + to[k] = deep_copy_array(v) + else + to[k] = v.clone + end + end to end diff --git a/lib/ldclient-rb/integrations/util/store_wrapper.rb b/lib/ldclient-rb/integrations/util/store_wrapper.rb index c94ace94..bb129c9c 100644 --- a/lib/ldclient-rb/integrations/util/store_wrapper.rb +++ b/lib/ldclient-rb/integrations/util/store_wrapper.rb @@ -22,7 +22,7 @@ module Util # class CachingStoreWrapper include LaunchDarkly::Interfaces::FeatureStore - + # # Creates a new store wrapper instance. # @@ -49,7 +49,7 @@ def init(all_data) @core.init_internal(all_data) @inited.make_true - if !@cache.nil? + unless @cache.nil? @cache.clear all_data.each do |kind, items| @cache[kind] = items_if_not_deleted(items) @@ -61,15 +61,15 @@ def init(all_data) end def get(kind, key) - if !@cache.nil? + unless @cache.nil? cache_key = item_cache_key(kind, key) cached = @cache[cache_key] # note, item entries in the cache are wrapped in an array so we can cache nil values - return item_if_not_deleted(cached[0]) if !cached.nil? + return item_if_not_deleted(cached[0]) unless cached.nil? end item = @core.get_internal(kind, key) - if !@cache.nil? + unless @cache.nil? @cache[cache_key] = [item] end @@ -77,20 +77,20 @@ def get(kind, key) end def all(kind) - if !@cache.nil? + unless @cache.nil? items = @cache[all_cache_key(kind)] - return items if !items.nil? + return items unless items.nil? end items = items_if_not_deleted(@core.get_all_internal(kind)) - @cache[all_cache_key(kind)] = items if !@cache.nil? + @cache[all_cache_key(kind)] = items unless @cache.nil? items end def upsert(kind, item) new_state = @core.upsert_internal(kind, item) - if !@cache.nil? + unless @cache.nil? @cache[item_cache_key(kind, item[:key])] = [new_state] @cache.delete(all_cache_key(kind)) end diff --git a/lib/ldclient-rb/interfaces.rb b/lib/ldclient-rb/interfaces.rb index b62a90fb..64120dd5 100644 --- a/lib/ldclient-rb/interfaces.rb +++ b/lib/ldclient-rb/interfaces.rb @@ -163,30 +163,30 @@ def get_metadata end # - # Queries the store for a snapshot of the current segment state for a specific user. + # Queries the store for a snapshot of the current segment state for a specific context. # - # The user_hash is a base64-encoded string produced by hashing the user key as defined by + # The context_hash is a base64-encoded string produced by hashing the context key as defined by # the Big Segments specification; the store implementation does not need to know the details # of how this is done, because it deals only with already-hashed keys, but the string can be # assumed to only contain characters that are valid in base64. # - # The return value should be either a Hash, or nil if the user is not referenced in any big + # The return value should be either a Hash, or nil if the context is not referenced in any big # segments. Each key in the Hash is a "segment reference", which is how segments are # identified in Big Segment data. This string is not identical to the segment key-- the SDK # will add other information. The store implementation should not be concerned with the - # format of the string. Each value in the Hash is true if the user is explicitly included in - # the segment, false if the user is explicitly excluded from the segment-- and is not also + # format of the string. Each value in the Hash is true if the context is explicitly included in + # the segment, false if the context is explicitly excluded from the segment-- and is not also # explicitly included (that is, if both an include and an exclude existed in the data, the - # include would take precedence). If the user's status in a particular segment is undefined, + # include would take precedence). If the context's status in a particular segment is undefined, # there should be no key or value for that segment. # # This Hash may be cached by the SDK, so it should not be modified after it is created. It # is a snapshot of the segment membership state at one point in time. # - # @param user_hash [String] - # @return [Hash] true/false values for Big Segments that reference this user + # @param context_hash [String] + # @return [Hash] true/false values for Big Segments that reference this context # - def get_membership(user_hash) + def get_membership(context_hash) end # @@ -216,7 +216,7 @@ def initialize(last_up_to_date) # # Information about the status of a Big Segment store, provided by {BigSegmentStoreStatusProvider}. # - # Big Segments are a specific type of user segments. For more information, read the LaunchDarkly + # Big Segments are a specific type of segments. For more information, read the LaunchDarkly # documentation: https://docs.launchdarkly.com/home/users/big-segments # class BigSegmentStoreStatus @@ -226,11 +226,11 @@ def initialize(available, stale) end # True if the Big Segment store is able to respond to queries, so that the SDK can evaluate - # whether a user is in a segment or not. + # whether a context is in a segment or not. # # If this property is false, the store is not able to make queries (for instance, it may not have # a valid database connection). In this case, the SDK will treat any reference to a Big Segment - # as if no users are included in that segment. Also, the {EvaluationReason} associated with + # as if no contexts are included in that segment. Also, the {EvaluationReason} associated with # with any flag evaluation that references a Big Segment when the store is not available will # have a `big_segments_status` of `STORE_ERROR`. # @@ -259,14 +259,14 @@ def ==(other) # # The Big Segment store is the component that receives information about Big Segments, normally # from a database populated by the LaunchDarkly Relay Proxy. Big Segments are a specific type - # of user segments. For more information, read the LaunchDarkly documentation: + # of segments. For more information, read the LaunchDarkly documentation: # https://docs.launchdarkly.com/home/users/big-segments # # An implementation of this interface is returned by {LDClient#big_segment_store_status_provider}. # Application code never needs to implement this interface. # # There are two ways to interact with the status. One is to simply get the current status; if its - # `available` property is true, then the SDK is able to evaluate user membership in Big Segments, + # `available` property is true, then the SDK is able to evaluate context membership in Big Segments, # and the `stale`` property indicates whether the data might be out of date. # # The other way is to subscribe to status change notifications. Applications may wish to know if diff --git a/lib/ldclient-rb/ldclient.rb b/lib/ldclient-rb/ldclient.rb index 70dc6210..79a4f49c 100644 --- a/lib/ldclient-rb/ldclient.rb +++ b/lib/ldclient-rb/ldclient.rb @@ -59,7 +59,7 @@ def initialize(sdk_key, config = Config.default, wait_for_sec = 5) get_flag = lambda { |key| @store.get(FEATURES, key) } get_segment = lambda { |key| @store.get(SEGMENTS, key) } - get_big_segments_membership = lambda { |key| @big_segment_store_manager.get_user_membership(key) } + get_big_segments_membership = lambda { |key| @big_segment_store_manager.get_context_membership(key) } @evaluator = LaunchDarkly::Impl::Evaluator.new(get_flag, get_segment, get_big_segments_membership, @config.logger) if !@config.offline? && @config.send_events && !@config.diagnostic_opt_out? @@ -120,26 +120,20 @@ def flush end # - # @param key [String] the feature flag key - # @param user [Hash] the user properties - # @param default [Boolean] (false) the value to use if the flag cannot be evaluated - # @return [Boolean] the flag value - # @deprecated Use {#variation} instead. - # - def toggle?(key, user, default = false) - @config.logger.warn { "[LDClient] toggle? is deprecated. Use variation instead" } - variation(key, user, default) - end - - # - # Creates a hash string that can be used by the JavaScript SDK to identify a user. + # Creates a hash string that can be used by the JavaScript SDK to identify a context. # For more information, see [Secure mode](https://docs.launchdarkly.com/sdk/features/secure-mode#ruby). # - # @param user [Hash] the user properties - # @return [String] a hash string + # @param context [Hash, LDContext] + # @return [String, nil] a hash string or nil if the provided context was invalid # - def secure_mode_hash(user) - OpenSSL::HMAC.hexdigest("sha256", @sdk_key, user[:key].to_s) + def secure_mode_hash(context) + context = Impl::Context::make_context(context) + unless context.valid? + @config.logger.warn("secure_mode_hash called with invalid context: #{context.error}") + return nil + end + + OpenSSL::HMAC.hexdigest("sha256", @sdk_key, context.fully_qualified_key) end # @@ -165,44 +159,22 @@ def initialized? end # - # Determines the variation of a feature flag to present to a user. - # - # At a minimum, the user hash should contain a `:key`, which should be the unique - # identifier for your user (or, for an anonymous user, a session identifier or - # cookie). - # - # Other supported user attributes include IP address, country code, and an arbitrary hash of - # custom attributes. For more about the supported user properties and how they work in - # LaunchDarkly, see [Targeting users](https://docs.launchdarkly.com/home/flags/targeting-users). - # - # The optional `:privateAttributeNames` user property allows you to specify a list of - # attribute names that should not be sent back to LaunchDarkly. - # [Private attributes](https://docs.launchdarkly.com/home/users/attributes#creating-private-user-attributes) - # can also be configured globally in {Config}. - # - # @example Basic user hash - # {key: "my-user-id"} - # - # @example More complete user hash - # {key: "my-user-id", ip: "127.0.0.1", country: "US", custom: {customer_rank: 1000}} - # - # @example User with a private attribute - # {key: "my-user-id", email: "email@example.com", privateAttributeNames: ["email"]} + # Determines the variation of a feature flag to present for a context. # # @param key [String] the unique feature key for the feature flag, as shown # on the LaunchDarkly dashboard - # @param user [Hash] a hash containing parameters for the end user requesting the flag + # @param context [Hash, LDContext] a hash or LDContext instance describing the context requesting the flag # @param default the default value of the flag; this is used if there is an error # condition making it impossible to find or evaluate the flag # - # @return the variation to show the user, or the default value if there's an an error + # @return the variation for the provided context, or the default value if there's an an error # - def variation(key, user, default) - evaluate_internal(key, user, default, false).value + def variation(key, context, default) + evaluate_internal(key, context, default, false).value end # - # Determines the variation of a feature flag for a user, like {#variation}, but also + # Determines the variation of a feature flag for a context, like {#variation}, but also # provides additional information about how this value was calculated. # # The return value of `variation_detail` is an {EvaluationDetail} object, which has @@ -218,43 +190,48 @@ def variation(key, user, default) # # @param key [String] the unique feature key for the feature flag, as shown # on the LaunchDarkly dashboard - # @param user [Hash] a hash containing parameters for the end user requesting the flag + # @param context [Hash, LDContext] a hash or object describing the context requesting the flag, # @param default the default value of the flag; this is used if there is an error # condition making it impossible to find or evaluate the flag # # @return [EvaluationDetail] an object describing the result # - def variation_detail(key, user, default) - evaluate_internal(key, user, default, true) + def variation_detail(key, context, default) + evaluate_internal(key, context, default, true) end # - # Registers the user. This method simply creates an analytics event containing the user - # properties, so that LaunchDarkly will know about that user if it does not already. + # Registers the context. This method simply creates an analytics event containing the context + # properties, so that LaunchDarkly will know about that context if it does not already. # - # Calling {#variation} or {#variation_detail} also sends the user information to + # Calling {#variation} or {#variation_detail} also sends the context information to # LaunchDarkly (if events are enabled), so you only need to use {#identify} if you - # want to identify the user without evaluating a flag. + # want to identify the context without evaluating a flag. # # Note that event delivery is asynchronous, so the event may not actually be sent # until later; see {#flush}. # - # @param user [Hash] The user to register; this can have all the same user properties - # described in {#variation} + # @param context [Hash, LDContext] a hash or object describing the context to register # @return [void] # - def identify(user) - if !user || user[:key].nil? || user[:key].empty? - @config.logger.warn("Identify called with nil user or empty user key!") + def identify(context) + context = LaunchDarkly::Impl::Context.make_context(context) + unless context.valid? + @config.logger.warn("Identify called with invalid context: #{context.error}") + return + end + + if context.key == "" + @config.logger.warn("Identify called with empty key") return end - sanitize_user(user) - @event_processor.record_identify_event(user) + + @event_processor.record_identify_event(context) end # - # Tracks that a user performed an event. This method creates a "custom" analytics event - # containing the specified event name (key), user properties, and optional data. + # Tracks that a context performed an event. This method creates a "custom" analytics event + # containing the specified event name (key), context properties, and optional data. # # Note that event delivery is asynchronous, so the event may not actually be sent # until later; see {#flush}. @@ -265,8 +242,7 @@ def identify(user) # for the latest status. # # @param event_name [String] The name of the event - # @param user [Hash] The user to register; this can have all the same user properties - # described in {#variation} + # @param context [Hash, LDContext] a hash or object describing the context to track # @param data [Hash] An optional hash containing any additional data associated with the event # @param metric_value [Number] A numeric value used by the LaunchDarkly experimentation # feature in numeric custom metrics. Can be omitted if this event is used by only @@ -274,52 +250,22 @@ def identify(user) # for Data Export. # @return [void] # - def track(event_name, user, data = nil, metric_value = nil) - if !user || user[:key].nil? - @config.logger.warn("Track called with nil user or nil user key!") - return - end - sanitize_user(user) - @event_processor.record_custom_event(user, event_name, data, metric_value) - end - - # - # Associates a new and old user object for analytics purposes via an alias event. - # - # @param current_context [Hash] The current version of a user. - # @param previous_context [Hash] The previous version of a user. - # @return [void] - # - def alias(current_context, previous_context) - if !current_context || current_context[:key].nil? || !previous_context || previous_context[:key].nil? - @config.logger.warn("Alias called with nil user or nil user key!") + def track(event_name, context, data = nil, metric_value = nil) + context = LaunchDarkly::Impl::Context.make_context(context) + unless context.valid? + @config.logger.warn("Track called with invalid context: #{context.error}") return end - sanitize_user(current_context) - sanitize_user(previous_context) - @event_processor.record_alias_event(current_context, previous_context) - end - # - # Returns all feature flag values for the given user. - # - # @deprecated Please use {#all_flags_state} instead. Current versions of the - # client-side SDK will not generate analytics events correctly if you pass the - # result of `all_flags`. - # - # @param user [Hash] The end user requesting the feature flags - # @return [Hash] a hash of feature flag keys to values - # - def all_flags(user) - all_flags_state(user).values_map + @event_processor.record_custom_event(context, event_name, data, metric_value) end # - # Returns a {FeatureFlagsState} object that encapsulates the state of all feature flags for a given user, + # Returns a {FeatureFlagsState} object that encapsulates the state of all feature flags for a given context, # including the flag values and also metadata that can be used on the front end. This method does not # send analytics events back to LaunchDarkly. # - # @param user [Hash] The end user requesting the feature flags + # @param context [Hash, LDContext] a hash or object describing the context requesting the flags, # @param options [Hash] Optional parameters to control how the state is generated # @option options [Boolean] :client_side_only (false) True if only flags marked for use with the # client-side SDK should be included in the state. By default, all flags are included. @@ -331,10 +277,10 @@ def all_flags(user) # of the JSON data if you are passing the flag state to the front end. # @return [FeatureFlagsState] a {FeatureFlagsState} object which can be serialized to JSON # - def all_flags_state(user, options={}) + def all_flags_state(context, options={}) return FeatureFlagsState.new(false) if @config.offline? - if !initialized? + unless initialized? if @store.initialized? @config.logger.warn { "Called all_flags_state before client initialization; using last known values from data store" } else @@ -343,8 +289,9 @@ def all_flags_state(user, options={}) end end - unless user && !user[:key].nil? - @config.logger.error { "[LDClient] User and user key must be specified in all_flags_state" } + context = Impl::Context::make_context(context) + unless context.valid? + @config.logger.error { "[LDClient] Context was invalid for all_flags_state (#{context.error})" } return FeatureFlagsState.new(false) end @@ -364,13 +311,13 @@ def all_flags_state(user, options={}) next end begin - detail = @evaluator.evaluate(f, user).detail + detail = @evaluator.evaluate(f, context).detail rescue => exn detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_EXCEPTION)) Util.log_exception(@config.logger, "Error evaluating flag \"#{k}\" in all_flags_state", exn) end - requires_experiment_data = is_experiment(f, detail.reason) + requires_experiment_data = experiment?(f, detail.reason) flag_state = { key: f[:key], value: detail.value, @@ -425,32 +372,34 @@ def create_default_data_source(sdk_key, config, diagnostic_accumulator) end end + # @param context [Hash, LDContext] # @return [EvaluationDetail] - def evaluate_internal(key, user, default, with_reasons) + def evaluate_internal(key, context, default, with_reasons) if @config.offline? return Evaluator.error_result(EvaluationReason::ERROR_CLIENT_NOT_READY, default) end - unless user - @config.logger.error { "[LDClient] Must specify user" } + if context.nil? + @config.logger.error { "[LDClient] Must specify context" } detail = Evaluator.error_result(EvaluationReason::ERROR_USER_NOT_SPECIFIED, default) return detail end - if user[:key].nil? - @config.logger.warn { "[LDClient] Variation called with nil user key; returning default value" } + context = Impl::Context::make_context(context) + unless context.valid? + @config.logger.error { "[LDClient] Context was invalid for flag evaluation (#{context.error}); returning default value" } detail = Evaluator.error_result(EvaluationReason::ERROR_USER_NOT_SPECIFIED, default) return detail end - if !initialized? + unless initialized? if @store.initialized? @config.logger.warn { "[LDClient] Client has not finished initializing; using last known values from feature store" } else @config.logger.error { "[LDClient] Client has not finished initializing; feature store unavailable, returning default value" } detail = Evaluator.error_result(EvaluationReason::ERROR_CLIENT_NOT_READY, default) - record_unknown_flag_eval(key, user, default, detail.reason, with_reasons) - return detail + record_unknown_flag_eval(key, context, default, detail.reason, with_reasons) + return detail end end @@ -459,35 +408,35 @@ def evaluate_internal(key, user, default, with_reasons) if feature.nil? @config.logger.info { "[LDClient] Unknown feature flag \"#{key}\". Returning default value" } detail = Evaluator.error_result(EvaluationReason::ERROR_FLAG_NOT_FOUND, default) - record_unknown_flag_eval(key, user, default, detail.reason, with_reasons) + record_unknown_flag_eval(key, context, default, detail.reason, with_reasons) return detail end begin - res = @evaluator.evaluate(feature, user) - if !res.prereq_evals.nil? + res = @evaluator.evaluate(feature, context) + unless res.prereq_evals.nil? res.prereq_evals.each do |prereq_eval| - record_prereq_flag_eval(prereq_eval.prereq_flag, prereq_eval.prereq_of_flag, user, prereq_eval.detail, with_reasons) + record_prereq_flag_eval(prereq_eval.prereq_flag, prereq_eval.prereq_of_flag, context, prereq_eval.detail, with_reasons) end end detail = res.detail if detail.default_value? detail = EvaluationDetail.new(default, nil, detail.reason) end - record_flag_eval(feature, user, detail, default, with_reasons) - return detail + record_flag_eval(feature, context, detail, default, with_reasons) + detail rescue => exn Util.log_exception(@config.logger, "Error evaluating feature flag \"#{key}\"", exn) detail = Evaluator.error_result(EvaluationReason::ERROR_EXCEPTION, default) - record_flag_eval_error(feature, user, default, detail.reason, with_reasons) - return detail + record_flag_eval_error(feature, context, default, detail.reason, with_reasons) + detail end end - private def record_flag_eval(flag, user, detail, default, with_reasons) - add_experiment_data = is_experiment(flag, detail.reason) + private def record_flag_eval(flag, context, detail, default, with_reasons) + add_experiment_data = experiment?(flag, detail.reason) @event_processor.record_eval_event( - user, + context, flag[:key], flag[:version], detail.variation_index, @@ -499,11 +448,11 @@ def evaluate_internal(key, user, default, with_reasons) nil ) end - - private def record_prereq_flag_eval(prereq_flag, prereq_of_flag, user, detail, with_reasons) - add_experiment_data = is_experiment(prereq_flag, detail.reason) + + private def record_prereq_flag_eval(prereq_flag, prereq_of_flag, context, detail, with_reasons) + add_experiment_data = experiment?(prereq_flag, detail.reason) @event_processor.record_eval_event( - user, + context, prereq_flag[:key], prereq_flag[:version], detail.variation_index, @@ -515,19 +464,26 @@ def evaluate_internal(key, user, default, with_reasons) prereq_of_flag[:key] ) end - - private def record_flag_eval_error(flag, user, default, reason, with_reasons) - @event_processor.record_eval_event(user, flag[:key], flag[:version], nil, default, with_reasons ? reason : nil, default, + + private def record_flag_eval_error(flag, context, default, reason, with_reasons) + @event_processor.record_eval_event(context, flag[:key], flag[:version], nil, default, with_reasons ? reason : nil, default, flag[:trackEvents], flag[:debugEventsUntilDate], nil) end - private def record_unknown_flag_eval(flag_key, user, default, reason, with_reasons) - @event_processor.record_eval_event(user, flag_key, nil, nil, default, with_reasons ? reason : nil, default, + # + # @param flag_key [String] + # @param context [LaunchDarkly::LDContext] + # @param default [any] + # @param reason [LaunchDarkly::EvaluationReason] + # @param with_reasons [Boolean] + # + private def record_unknown_flag_eval(flag_key, context, default, reason, with_reasons) + @event_processor.record_eval_event(context, flag_key, nil, nil, default, with_reasons ? reason : nil, default, false, nil, nil) end - private def is_experiment(flag, reason) - return false if !reason + private def experiment?(flag, reason) + return false unless reason if reason.in_experiment return true @@ -536,7 +492,7 @@ def evaluate_internal(key, user, default, with_reasons) case reason[:kind] when 'RULE_MATCH' index = reason[:ruleIndex] - if !index.nil? + unless index.nil? rules = flag[:rules] || [] return index >= 0 && index < rules.length && rules[index][:trackEvents] end @@ -545,12 +501,6 @@ def evaluate_internal(key, user, default, with_reasons) end false end - - private def sanitize_user(user) - if user[:key] - user[:key] = user[:key].to_s - end - end end # diff --git a/lib/ldclient-rb/memoized_value.rb b/lib/ldclient-rb/memoized_value.rb index ddddb7e0..7a829f29 100644 --- a/lib/ldclient-rb/memoized_value.rb +++ b/lib/ldclient-rb/memoized_value.rb @@ -14,7 +14,7 @@ def initialize(&generator) def get @mutex.synchronize do - if !@inited + unless @inited @value = @generator.call @inited = true end diff --git a/lib/ldclient-rb/newrelic.rb b/lib/ldclient-rb/newrelic.rb deleted file mode 100644 index 5c9b7d48..00000000 --- a/lib/ldclient-rb/newrelic.rb +++ /dev/null @@ -1,17 +0,0 @@ -module LaunchDarkly - # @private - class LDNewRelic - begin - require "newrelic_rpm" - NR_ENABLED = defined?(::NewRelic::Agent.add_custom_parameters) - rescue ScriptError, StandardError - NR_ENABLED = false - end - - def self.annotate_transaction(key, value) - if NR_ENABLED - ::NewRelic::Agent.add_custom_parameters(key.to_s => value.to_s) - end - end - end -end diff --git a/lib/ldclient-rb/non_blocking_thread_pool.rb b/lib/ldclient-rb/non_blocking_thread_pool.rb index 28ec42a9..06d644ec 100644 --- a/lib/ldclient-rb/non_blocking_thread_pool.rb +++ b/lib/ldclient-rb/non_blocking_thread_pool.rb @@ -17,7 +17,7 @@ def initialize(capacity) # Attempts to submit a job, but only if a worker is available. Unlike the regular post method, # this returns a value: true if the job was submitted, false if all workers are busy. def post - if !@semaphore.try_acquire(1) + unless @semaphore.try_acquire(1) return end @pool.post do diff --git a/lib/ldclient-rb/polling.rb b/lib/ldclient-rb/polling.rb index d571f837..89d9f6c9 100644 --- a/lib/ldclient-rb/polling.rb +++ b/lib/ldclient-rb/polling.rb @@ -43,8 +43,8 @@ def poll end rescue UnexpectedResponseError => e message = Util.http_error_message(e.status, "polling request", "will retry") - @config.logger.error { "[LDClient] #{message}" }; - if !Util.http_error_recoverable?(e.status) + @config.logger.error { "[LDClient] #{message}" } + unless Util.http_error_recoverable?(e.status) @ready.set # if client was waiting on us, make it stop waiting - has no effect if already set stop end diff --git a/lib/ldclient-rb/redis_store.rb b/lib/ldclient-rb/redis_store.rb deleted file mode 100644 index b94e61f2..00000000 --- a/lib/ldclient-rb/redis_store.rb +++ /dev/null @@ -1,88 +0,0 @@ -require "ldclient-rb/interfaces" -require "ldclient-rb/impl/integrations/redis_impl" - -module LaunchDarkly - # - # An implementation of the LaunchDarkly client's feature store that uses a Redis - # instance. This object holds feature flags and related data received from the - # streaming API. Feature data can also be further cached in memory to reduce overhead - # of calls to Redis. - # - # To use this class, you must first have the `redis` and `connection-pool` gems - # installed. Then, create an instance and store it in the `feature_store` property - # of your client configuration. - # - # @deprecated Use the factory method in {LaunchDarkly::Integrations::Redis} instead. This specific - # implementation class may be changed or removed in the future. - # - class RedisFeatureStore - include LaunchDarkly::Interfaces::FeatureStore - - # Note that this class is now just a facade around CachingStoreWrapper, which is in turn delegating - # to RedisFeatureStoreCore where the actual database logic is. This class was retained for historical - # reasons, so that existing code can still call RedisFeatureStore.new. In the future, we will migrate - # away from exposing these concrete classes and use factory methods instead. - - # - # Constructor for a RedisFeatureStore instance. - # - # @param opts [Hash] the configuration options - # @option opts [String] :redis_url URL of the Redis instance (shortcut for omitting redis_opts) - # @option opts [Hash] :redis_opts options to pass to the Redis constructor (if you want to specify more than just redis_url) - # @option opts [String] :prefix namespace prefix to add to all hash keys used by LaunchDarkly - # @option opts [Logger] :logger a `Logger` instance; defaults to `Config.default_logger` - # @option opts [Integer] :max_connections size of the Redis connection pool - # @option opts [Integer] :expiration expiration time for the in-memory cache, in seconds; 0 for no local caching - # @option opts [Integer] :capacity maximum number of feature flags (or related objects) to cache locally - # @option opts [Object] :pool custom connection pool, if desired - # @option opts [Boolean] :pool_shutdown_on_close whether calling `close` should shutdown the custom connection pool. - # - def initialize(opts = {}) - core = LaunchDarkly::Impl::Integrations::Redis::RedisFeatureStoreCore.new(opts) - @wrapper = LaunchDarkly::Integrations::Util::CachingStoreWrapper.new(core, opts) - end - - # - # Default value for the `redis_url` constructor parameter; points to an instance of Redis - # running at `localhost` with its default port. - # - def self.default_redis_url - LaunchDarkly::Integrations::Redis::default_redis_url - end - - # - # Default value for the `prefix` constructor parameter. - # - def self.default_prefix - LaunchDarkly::Integrations::Redis::default_prefix - end - - def get(kind, key) - @wrapper.get(kind, key) - end - - def all(kind) - @wrapper.all(kind) - end - - def delete(kind, key, version) - @wrapper.delete(kind, key, version) - end - - def init(all_data) - @wrapper.init(all_data) - end - - def upsert(kind, item) - @wrapper.upsert(kind, item) - end - - def initialized? - @wrapper.initialized? - end - - def stop - @wrapper.stop - end - end -end diff --git a/lib/ldclient-rb/reference.rb b/lib/ldclient-rb/reference.rb new file mode 100644 index 00000000..26595c74 --- /dev/null +++ b/lib/ldclient-rb/reference.rb @@ -0,0 +1,274 @@ +module LaunchDarkly + # + # Reference is an attribute name or path expression identifying a value + # within a Context. + # + # This type is mainly intended to be used internally by LaunchDarkly SDK and + # service code, where efficiency is a major concern so it's desirable to do + # any parsing or preprocessing just once. Applications are unlikely to need + # to use the Reference type directly. + # + # It can be used to retrieve a value with LDContext.get_value_for_reference() + # or to identify an attribute or nested value that should be considered + # private. + # + # Parsing and validation are done at the time that the Reference is + # constructed. If a Reference instance was created from an invalid string, it + # is considered invalid and its {Reference#error} attribute will return a + # non-nil error. + # + # ## Syntax + # + # The string representation of an attribute reference in LaunchDarkly JSON + # data uses the following syntax: + # + # If the first character is not a slash, the string is interpreted literally + # as an attribute name. An attribute name can contain any characters, but + # must not be empty. + # + # If the first character is a slash, the string is interpreted as a + # slash-delimited path where the first path component is an attribute name, + # and each subsequent path component is the name of a property in a JSON + # object. Any instances of the characters "/" or "~" in a path component are + # escaped as "~1" or "~0" respectively. This syntax deliberately resembles + # JSON Pointer, but no JSON Pointer behaviors other than those mentioned here + # are supported. + # + # ## Examples + # + # Suppose there is a context whose JSON implementation looks like this: + # + # { + # "kind": "user", + # "key": "value1", + # "address": { + # "street": { + # "line1": "value2", + # "line2": "value3" + # }, + # "city": "value4" + # }, + # "good/bad": "value5" + # } + # + # The attribute references "key" and "/key" would both point to "value1". + # + # The attribute reference "/address/street/line1" would point to "value2". + # + # The attribute references "good/bad" and "/good~1bad" would both point to + # "value5". + # + class Reference + ERR_EMPTY = 'empty reference' + private_constant :ERR_EMPTY + + ERR_INVALID_ESCAPE_SEQUENCE = 'invalid escape sequence' + private_constant :ERR_INVALID_ESCAPE_SEQUENCE + + ERR_DOUBLE_TRAILING_SLASH = 'double or trailing slash' + private_constant :ERR_DOUBLE_TRAILING_SLASH + + # + # Returns nil for a valid Reference, or a non-nil error value for an + # invalid Reference. + # + # A Reference is invalid if the input string is empty, or starts with a + # slash but is not a valid slash-delimited path, or starts with a slash and + # contains an invalid escape sequence. + # + # Otherwise, the Reference is valid, but that does not guarantee that such + # an attribute exists in any given Context. For instance, + # Reference.create("name") is a valid Reference, but a specific Context + # might or might not have a name. + # + # See comments on the Reference type for more details of the attribute + # reference syntax. + # + # @return [String, nil] + # + attr_reader :error + + # + # Returns the attribute reference as a string, in the same format provided + # to {#create}. + # + # If the Reference was created with {#create}, this value is identical to + # the original string. If it was created with {#create_literal}, the value + # may be different due to unescaping (for instance, an attribute whose name + # is "/a" would be represented as "~1a"). + # + # @return [String, nil] + # + attr_reader :raw_path + + def initialize(raw_path, components = [], error = nil) + @raw_path = raw_path + # @type [Array] + @components = components + @error = error + end + private_class_method :new + + # + # Creates a Reference from a string. For the supported syntax and examples, + # see comments on the Reference type. + # + # This constructor always returns a Reference that preserves the original + # string, even if validation fails, so that accessing {#raw_path} (or + # serializing the Reference to JSON) will produce the original string. If + # validation fails, {#error} will return a non-nil error and any SDK method + # that takes this Reference as a parameter will consider it invalid. + # + # @param value [String, Symbol] + # @return [Reference] + # + def self.create(value) + unless value.is_a?(String) || value.is_a?(Symbol) + return new(value, [], ERR_EMPTY) + end + + value = value.to_s if value.is_a?(Symbol) + + return new(value, [], ERR_EMPTY) if value.empty? || value == "/" + + unless value.start_with? "/" + return new(value, [value.to_sym]) + end + + if value.end_with? "/" + return new(value, [], ERR_DOUBLE_TRAILING_SLASH) + end + + components = [] + value[1..].split("/").each do |component| + if component.empty? + return new(value, [], ERR_DOUBLE_TRAILING_SLASH) + end + + path, error = unescape_path(component) + + if error + return new(value, [], error) + end + + components << path.to_sym + end + + new(value, components) + end + + # + # create_literal is similar to {#create} except that it always + # interprets the string as a literal attribute name, never as a + # slash-delimited path expression. There is no escaping or unescaping, even + # if the name contains literal '/' or '~' characters. Since an attribute + # name can contain any characters, this method always returns a valid + # Reference unless the name is empty. + # + # For example: Reference.create_literal("name") is exactly equivalent to + # Reference.create("name"). Reference.create_literal("a/b") is exactly + # equivalent to Reference.create("a/b") (since the syntax used by {#create} + # treats the whole string as a literal as long as it does not start with a + # slash), or to Reference.create("/a~1b"). + # + # @param value [String, Symbol] + # @return [Reference] + # + def self.create_literal(value) + unless value.is_a?(String) || value.is_a?(Symbol) + return new(value, [], ERR_EMPTY) + end + + value = value.to_s if value.is_a?(Symbol) + + return new(value, [], ERR_EMPTY) if value.empty? + return new(value, [value.to_sym]) if value[0] != '/' + + escaped = "/" + value.gsub('~', '~0').gsub('/', '~1') + new(escaped, [value.to_sym]) + end + + # + # Returns the number of path components in the Reference. + # + # For a simple attribute reference such as "name" with no leading slash, + # this returns 1. + # + # For an attribute reference with a leading slash, it is the number of + # slash-delimited path components after the initial slash. For instance, + # NewRef("/a/b").Depth() returns 2. + # + # @return [Integer] + # + def depth + @components.size + end + + # + # Retrieves a single path component from the attribute reference. + # + # For a simple attribute reference such as "name" with no leading slash, if + # index is zero, {#component} returns the attribute name as a symbol. + # + # For an attribute reference with a leading slash, if index is non-negative + # and less than {#depth}, Component returns the path component as a symbol. + # + # If index is out of range, it returns nil. + # + # Reference.create("a").component(0) # returns "a" + # Reference.create("/a/b").component(1) # returns "b" + # + # @param index [Integer] + # @return [Symbol, nil] + # + def component(index) + return nil if index < 0 || index >= depth + + @components[index] + end + + # + # Performs unescaping of attribute reference path components: + # + # "~1" becomes "/" + # "~0" becomes "~" + # "~" followed by any character other than "0" or "1" is invalid + # + # This method returns an array of two values. The first element of the + # array is the path if unescaping was valid; otherwise, it will be nil. The + # second value is an error string, or nil if the unescaping was successful. + # + # @param path [String] + # @return [Array([String, nil], [String, nil])] Returns a fixed size array. + # + private_class_method def self.unescape_path(path) + # If there are no tildes then there's definitely nothing to do + return path, nil unless path.include? '~' + + out = "" + i = 0 + while i < path.size + if path[i] != "~" + out << path[i] + i += 1 + next + end + + return nil, ERR_INVALID_ESCAPE_SEQUENCE if i + 1 == path.size + + case path[i + 1] + when '0' + out << "~" + when '1' + out << '/' + else + return nil, ERR_INVALID_ESCAPE_SEQUENCE + end + + i += 2 + end + + [out, nil] + end + end +end diff --git a/lib/ldclient-rb/requestor.rb b/lib/ldclient-rb/requestor.rb index 7d3c4cb9..58db38ab 100644 --- a/lib/ldclient-rb/requestor.rb +++ b/lib/ldclient-rb/requestor.rb @@ -33,7 +33,7 @@ def request_all_data() all_data = JSON.parse(make_request("/sdk/latest-all"), symbolize_names: true) Impl::Model.make_all_store_data(all_data, @config.logger) end - + def stop begin @http_client.close @@ -53,11 +53,11 @@ def make_request(path) Impl::Util.default_http_headers(@sdk_key, @config).each { |k, v| headers[k] = v } headers["Connection"] = "keep-alive" cached = @cache.read(uri) - if !cached.nil? + unless cached.nil? headers["If-None-Match"] = cached.etag end response = @http_client.request("GET", uri, { - headers: headers + headers: headers, }) status = response.status.code # must fully read body for persistent connections @@ -72,7 +72,7 @@ def make_request(path) end body = fix_encoding(body, response.headers["content-type"]) etag = response.headers["etag"] - @cache.write(uri, CacheEntry.new(etag, body)) if !etag.nil? + @cache.write(uri, CacheEntry.new(etag, body)) unless etag.nil? end body end @@ -96,7 +96,7 @@ def parse_content_type(value) break end end - return [parts[0], charset] + [parts[0], charset] end end end diff --git a/lib/ldclient-rb/stream.rb b/lib/ldclient-rb/stream.rb index 5ab3eea8..d6ab5086 100644 --- a/lib/ldclient-rb/stream.rb +++ b/lib/ldclient-rb/stream.rb @@ -17,7 +17,7 @@ module LaunchDarkly # @private KEY_PATHS = { FEATURES => "/flags/", - SEGMENTS => "/segments/" + SEGMENTS => "/segments/", } # @private @@ -41,14 +41,14 @@ def start return @ready unless @started.make_true @config.logger.info { "[LDClient] Initializing stream connection" } - + headers = Impl::Util.default_http_headers(@sdk_key, @config) opts = { headers: headers, read_timeout: READ_TIMEOUT_SECONDS, logger: @config.logger, socket_factory: @config.socket_factory, - reconnect_time: @config.initial_reconnect_delay + reconnect_time: @config.initial_reconnect_delay, } log_connection_started @es = SSE::Client.new(@config.stream_uri + "/all", **opts) do |conn| @@ -60,14 +60,14 @@ def start status = err.status message = Util.http_error_message(status, "streaming connection", "will retry") @config.logger.error { "[LDClient] #{message}" } - if !Util.http_error_recoverable?(status) + unless Util.http_error_recoverable?(status) @ready.set # if client was waiting on us, make it stop waiting - has no effect if already set stop end end } end - + @ready end @@ -96,9 +96,8 @@ def process_message(message) for kind in [FEATURES, SEGMENTS] key = key_for_path(kind, data[:path]) if key - data = data[:data] - Impl::DataModelPreprocessing::Preprocessor.new(@config.logger).preprocess_item!(kind, data) - @feature_store.upsert(kind, data) + item = Impl::Model.deserialize(kind, data[:data], @config.logger) + @feature_store.upsert(kind, item) break end end diff --git a/lib/ldclient-rb/user_filter.rb b/lib/ldclient-rb/user_filter.rb deleted file mode 100644 index b67f6844..00000000 --- a/lib/ldclient-rb/user_filter.rb +++ /dev/null @@ -1,52 +0,0 @@ -require "json" -require "set" - -module LaunchDarkly - # @private - class UserFilter - def initialize(config) - @all_attributes_private = config.all_attributes_private - @private_attribute_names = Set.new(config.private_attribute_names.map(&:to_sym)) - end - - def transform_user_props(user_props) - return nil if user_props.nil? - - user_private_attrs = Set.new((user_props[:privateAttributeNames] || []).map(&:to_sym)) - - filtered_user_props, removed = filter_values(user_props, user_private_attrs, ALLOWED_TOP_LEVEL_KEYS, IGNORED_TOP_LEVEL_KEYS) - custom = user_props[:custom] - if !custom.nil? - filtered_user_props[:custom], removed_custom = filter_values(custom, user_private_attrs) - removed.merge(removed_custom) - end - - unless removed.empty? - # note, :privateAttributeNames is what the developer sets; :privateAttrs is what we send to the server - filtered_user_props[:privateAttrs] = removed.to_a.sort.map { |s| s.to_s } - end - return filtered_user_props - end - - private - - ALLOWED_TOP_LEVEL_KEYS = Set.new([:key, :secondary, :ip, :country, :email, - :firstName, :lastName, :avatar, :name, :anonymous, :custom]) - IGNORED_TOP_LEVEL_KEYS = Set.new([:custom, :key, :anonymous]) - - def filter_values(props, user_private_attrs, allowed_keys = [], keys_to_leave_as_is = []) - is_valid_key = lambda { |key| allowed_keys.empty? || allowed_keys.include?(key) } - removed_keys = Set.new(props.keys.select { |key| - # Note that if is_valid_key returns false, we don't explicitly *remove* the key (which would place - # it in the privateAttrs list) - we just silently drop it when we calculate filtered_hash. - is_valid_key.call(key) && !keys_to_leave_as_is.include?(key) && private_attr?(key, user_private_attrs) - }) - filtered_hash = props.select { |key, value| !removed_keys.include?(key) && is_valid_key.call(key) } - [filtered_hash, removed_keys] - end - - def private_attr?(name, user_private_attrs) - @all_attributes_private || @private_attribute_names.include?(name) || user_private_attrs.include?(name) - end - end -end diff --git a/lib/ldclient-rb/util.rb b/lib/ldclient-rb/util.rb index 5aac9d1e..df4ae191 100644 --- a/lib/ldclient-rb/util.rb +++ b/lib/ldclient-rb/util.rb @@ -4,39 +4,24 @@ module LaunchDarkly # @private module Util - def self.stringify_attrs(hash, attrs) - return hash if hash.nil? - ret = hash - changed = false - attrs.each do |attr| - value = hash[attr] - if !value.nil? && !value.is_a?(String) - ret = hash.clone if !changed - ret[attr] = value.to_s - changed = true - end - end - ret - end - def self.new_http_client(uri_s, config) http_client_options = {} if config.socket_factory http_client_options["socket_class"] = config.socket_factory end proxy = URI.parse(uri_s).find_proxy - if !proxy.nil? + unless proxy.nil? http_client_options["proxy"] = { proxy_address: proxy.host, proxy_port: proxy.port, proxy_username: proxy.user, - proxy_password: proxy.password + proxy_password: proxy.password, } end - return HTTP::Client.new(http_client_options) + HTTP::Client.new(http_client_options) .timeout({ read: config.read_timeout, - connect: config.connect_timeout + connect: config.connect_timeout, }) .persistent(uri_s) end diff --git a/spec/big_segment_store_spec_base.rb b/spec/big_segment_store_spec_base.rb index 29f344a1..b7c627f1 100644 --- a/spec/big_segment_store_spec_base.rb +++ b/spec/big_segment_store_spec_base.rb @@ -17,8 +17,8 @@ # def set_big_segments_metadata(metadata) # # write the metadata to the database, taking @options[:prefix] into account # end -# def set_big_segments(user_hash, includes, excludes) -# # update the include and exclude lists for a user, taking @options[:prefix] into account +# def set_big_segments(context_hash, includes, excludes) +# # update the include and exclude lists for a context, taking @options[:prefix] into account # end # end # @@ -31,14 +31,14 @@ prefix_test_groups = [ ["with default prefix", {}], - ["with specified prefix", { prefix: "testprefix" }] + ["with specified prefix", { prefix: "testprefix" }], ] prefix_test_groups.each do |subgroup_description, prefix_options| context(subgroup_description) do # The following tests are done for each permutation of (default prefix/specified prefix) let(:store_tester) { store_tester_class.new(prefix_options.merge(base_options)) } - let(:fake_user_hash) { "userhash" } + let(:fake_context_hash) { "contexthash" } def with_empty_store store_tester.clear_data @@ -52,57 +52,57 @@ def with_empty_store expected_timestamp = 1234567890 with_empty_store do |store| store_tester.set_big_segments_metadata(LaunchDarkly::Interfaces::BigSegmentStoreMetadata.new(expected_timestamp)) - + actual = store.get_metadata - + expect(actual).not_to be nil expect(actual.last_up_to_date).to eq(expected_timestamp) end end - + it "no value" do with_empty_store do |store| actual = store.get_metadata - + expect(actual).not_to be nil expect(actual.last_up_to_date).to be nil end end end - + context "get_membership" do it "not found" do with_empty_store do |store| - membership = store.get_membership(fake_user_hash) + membership = store.get_membership(fake_context_hash) membership = {} if membership.nil? - + expect(membership).to eq({}) end end - + it "includes only" do with_empty_store do |store| - store_tester.set_big_segments(fake_user_hash, ["key1", "key2"], []) - - membership = store.get_membership(fake_user_hash) + store_tester.set_big_segments(fake_context_hash, ["key1", "key2"], []) + + membership = store.get_membership(fake_context_hash) expect(membership).to eq({ "key1" => true, "key2" => true }) end end - + it "excludes only" do with_empty_store do |store| - store_tester.set_big_segments(fake_user_hash, [], ["key1", "key2"]) - - membership = store.get_membership(fake_user_hash) + store_tester.set_big_segments(fake_context_hash, [], ["key1", "key2"]) + + membership = store.get_membership(fake_context_hash) expect(membership).to eq({ "key1" => false, "key2" => false }) end end - + it "includes and excludes" do with_empty_store do |store| - store_tester.set_big_segments(fake_user_hash, ["key1", "key2"], ["key2", "key3"]) - - membership = store.get_membership(fake_user_hash) + store_tester.set_big_segments(fake_context_hash, ["key1", "key2"], ["key2", "key3"]) + + membership = store.get_membership(fake_context_hash) expect(membership).to eq({ "key1" => true, "key2" => true, "key3" => false }) # include of key2 overrides exclude end end diff --git a/spec/config_spec.rb b/spec/config_spec.rb index 2b66e8b9..2196bcad 100644 --- a/spec/config_spec.rb +++ b/spec/config_spec.rb @@ -76,7 +76,7 @@ end it "will drop invalid values" do - [" ", "@", ":", "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-a"]. each do |value| + [" ", "@", ":", "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-a"].each do |value| expect(subject.new(logger: $null_log, application: { id: value, version: value }).application).to eq ({ id: "", version: "" }) end end @@ -86,11 +86,37 @@ { :id => "id", :version => "version", :expected => "application-id/id application-version/version" }, { :id => "id", :version => "", :expected => "application-id/id" }, { :id => "", :version => "version", :expected => "application-version/version" }, - { :id => "", :version => "", :expected => "" } + { :id => "", :version => "", :expected => "" }, ].each do |test_case| config = subject.new(application: { id: test_case[:id], version: test_case[:version] }) expect(LaunchDarkly::Impl::Util.application_header_value(config.application)).to eq test_case[:expected] end end end + + describe "context and user aliases" do + it "default values are aliased correctly" do + expect(LaunchDarkly::Config.default_context_keys_capacity).to eq LaunchDarkly::Config.default_user_keys_capacity + expect(LaunchDarkly::Config.default_context_keys_flush_interval).to eq LaunchDarkly::Config.default_user_keys_flush_interval + end + + it "context options are reflected in user options" do + config = subject.new(context_keys_capacity: 50, context_keys_flush_interval: 25) + expect(config.context_keys_capacity).to eq config.user_keys_capacity + expect(config.context_keys_flush_interval).to eq config.user_keys_flush_interval + end + + it "context options can be set by user options" do + config = subject.new(user_keys_capacity: 50, user_keys_flush_interval: 25) + expect(config.context_keys_capacity).to eq config.user_keys_capacity + expect(config.context_keys_flush_interval).to eq config.user_keys_flush_interval + end + + it "context options take precedence" do + config = subject.new(context_keys_capacity: 100, user_keys_capacity: 50, context_keys_flush_interval: 100, user_keys_flush_interval: 50) + + expect(config.context_keys_capacity).to eq 100 + expect(config.context_keys_flush_interval).to eq 100 + end + end end diff --git a/spec/context_spec.rb b/spec/context_spec.rb new file mode 100644 index 00000000..ae1c9cfd --- /dev/null +++ b/spec/context_spec.rb @@ -0,0 +1,369 @@ +require "ldclient-rb/context" + +describe LaunchDarkly::LDContext do + subject { LaunchDarkly::LDContext } + + it "returns nil for any value if invalid" do + result = subject.create({ key: "", kind: "user", name: "testing" }) + + expect(result.valid?).to be false + + expect(result.key).to be_nil + expect(result.get_value(:key)).to be_nil + + expect(result.kind).to be_nil + expect(result.get_value(:kind)).to be_nil + + expect(result.get_value(:name)).to be_nil + end + + describe "context construction" do + describe "legacy users contexts" do + it "can be created using the legacy user format" do + context = { + key: "user-key", + custom: { + address: { + street: "123 Main St.", + city: "Every City", + state: "XX", + }, + }, + } + result = subject.create(context) + expect(result).to be_a(LaunchDarkly::LDContext) + expect(result.key).to eq("user-key") + expect(result.kind).to eq("user") + expect(result.valid?).to be true + end + + it "allows an empty string for a key, but it cannot be missing or nil" do + expect(subject.create({ key: "" }).valid?).to be true + expect(subject.create({ key: nil }).valid?).to be false + expect(subject.create({}).valid?).to be false + end + + it "anonymous is required to be a boolean or nil" do + expect(subject.create({ key: "" }).valid?).to be true + expect(subject.create({ key: "", anonymous: true }).valid?).to be true + expect(subject.create({ key: "", anonymous: false }).valid?).to be true + expect(subject.create({ key: "", anonymous: 0 }).valid?).to be false + end + + it "name is required to be a string or nil" do + expect(subject.create({ key: "" }).valid?).to be true + expect(subject.create({ key: "", name: "My Name" }).valid?).to be true + expect(subject.create({ key: "", name: 0 }).valid?).to be false + end + + it "creates the correct fully qualified key" do + expect(subject.create({ key: "user-key" }).fully_qualified_key).to eq("user-key") + end + + it "requires privateAttributeNames to be an array" do + context = { + key: "user-key", + privateAttributeNames: "not an array", + } + expect(subject.create(context).valid?).to be false + end + + it "overwrite custom properties with built-ins when collisions occur" do + context = { + key: "user-key", + ip: "192.168.1.1", + avatar: "avatar", + custom: { + ip: "127.0.0.1", + avatar: "custom avatar", + }, + } + + result = subject.create(context) + expect(result.get_value(:ip)).to eq("192.168.1.1") + expect(result.get_value(:avatar)).to eq("avatar") + end + end + + describe "single kind contexts" do + it "can be created using the new format" do + context = { + key: "launchdarkly", + kind: "org", + address: { + street: "1999 Harrison St Suite 1100", + city: "Oakland", + state: "CA", + zip: "94612", + }, + } + result = subject.create(context) + expect(result).to be_a(LaunchDarkly::LDContext) + expect(result.key).to eq("launchdarkly") + expect(result.kind).to eq("org") + expect(result.valid?).to be true + end + + it "do not allow empty strings or nil values for keys" do + expect(subject.create({ kind: "user", key: "" }).valid?).to be false + expect(subject.create({ kind: "user", key: nil }).valid?).to be false + expect(subject.create({ kind: "user" }).valid?).to be false + end + + it "does not allow reserved names or empty values for kind" do + expect(subject.create({ kind: true, key: "key" }).valid?).to be false + expect(subject.create({ kind: "", key: "key" }).valid?).to be false + expect(subject.create({ kind: "kind", key: "key" }).valid?).to be false + expect(subject.create({ kind: "multi", key: "key" }).valid?).to be false + end + + it "anonymous is required to be a boolean or nil" do + expect(subject.create({ key: "key", kind: "user" }).valid?).to be true + expect(subject.create({ key: "key", kind: "user", anonymous: nil }).valid?).to be false + expect(subject.create({ key: "key", kind: "user", anonymous: true }).valid?).to be true + expect(subject.create({ key: "key", kind: "user", anonymous: false }).valid?).to be true + expect(subject.create({ key: "key", kind: "user", anonymous: 0 }).valid?).to be false + end + + it "name is required to be a string or nil" do + expect(subject.create({ key: "key", kind: "user" }).valid?).to be true + expect(subject.create({ key: "key", kind: "user", name: "My Name" }).valid?).to be true + expect(subject.create({ key: "key", kind: "user", name: 0 }).valid?).to be false + end + + it "require privateAttributes to be an array" do + context = { + key: "user-key", + kind: "user", + _meta: { + privateAttributes: "not an array", + }, + } + expect(subject.create(context).valid?).to be false + end + + it "creates the correct fully qualified key" do + expect(subject.create({ key: "user-key", kind: "user" }).fully_qualified_key).to eq("user-key") + expect(subject.create({ key: "org-key", kind: "org" }).fully_qualified_key).to eq("org:org-key") + end + end + + describe "multi-kind contexts" do + it "can be created from single kind contexts" do + user_context = subject.create({ key: "user-key" }) + org_context = subject.create({ key: "org-key", kind: "org" }) + multi_context = subject.create_multi([user_context, org_context]) + + expect(multi_context).to be_a(LaunchDarkly::LDContext) + expect(multi_context.key).to be_nil + expect(multi_context.kind).to eq("multi") + expect(multi_context.valid?).to be true + end + + it "can be created from a hash" do + data = { kind: "multi", user_context: { key: "user-key"}, org: { key: "org-key"}} + multi_context = subject.create(data) + + expect(multi_context).to be_a(LaunchDarkly::LDContext) + expect(multi_context.key).to be_nil + expect(multi_context.kind).to eq(LaunchDarkly::LDContext::KIND_MULTI) + expect(multi_context.valid?).to be true + end + + it "will return the single kind context if only one is provided" do + user_context = subject.create({ key: "user-key" }) + multi_context = subject.create_multi([user_context]) + + expect(multi_context).to be_a(LaunchDarkly::LDContext) + expect(multi_context).to eq(user_context) + end + + it "cannot include another multi-kind context" do + user_context = subject.create({ key: "user-key" }) + org_context = subject.create({ key: "org-key", kind: "org" }) + embedded_multi_context = subject.create_multi([user_context, org_context]) + multi_context = subject.create_multi([embedded_multi_context]) + + expect(multi_context).to be_a(LaunchDarkly::LDContext) + expect(multi_context.valid?).to be false + end + + it "are invalid if no contexts are provided" do + multi_context = subject.create_multi([]) + expect(multi_context.valid?).to be false + end + + it "are invalid if a single context is invalid" do + valid_context = subject.create({ kind: "user", key: "user-key" }) + invalid_context = subject.create({ kind: "org" }) + multi_context = subject.create_multi([valid_context, invalid_context]) + + expect(valid_context.valid?).to be true + expect(invalid_context.valid?).to be false + expect(multi_context.valid?).to be false + end + + it "creates the correct fully qualified key" do + user_context = subject.create({ key: "a-user-key" }) + org_context = subject.create({ key: "b-org-key", kind: "org" }) + user_first = subject.create_multi([user_context, org_context]) + org_first = subject.create_multi([org_context, user_context]) + + # Verify we are sorting contexts by kind when generating the canonical key + expect(user_first.fully_qualified_key).to eq("org:b-org-key:user:a-user-key") + expect(org_first.fully_qualified_key).to eq("org:b-org-key:user:a-user-key") + end + end + end + + describe "context counts" do + it "invalid contexts have a size of 0" do + context = subject.create({}) + + expect(context.valid?).to be false + expect(context.individual_context_count).to eq(0) + end + + it "individual contexts have a size of 1" do + context = subject.create({ kind: "user", key: "user-key" }) + expect(context.individual_context_count).to eq(1) + end + + it "multi-kind contexts have a size equal to the single-kind contexts" do + user_context = subject.create({ key: "user-key", kind: "user" }) + org_context = subject.create({ key: "org-key", kind: "org" }) + multi_context = subject.create_multi([user_context, org_context]) + + expect(multi_context.individual_context_count).to eq(2) + end + end + + describe "retrieving specific contexts" do + it "invalid contexts always return nil" do + context = subject.create({kind: "user"}) + + expect(context.valid?).to be false + expect(context.individual_context(-1)).to be_nil + expect(context.individual_context(0)).to be_nil + expect(context.individual_context(1)).to be_nil + + expect(context.individual_context("user")).to be_nil + end + + it "single contexts can retrieve themselves" do + context = subject.create({key: "user-key", kind: "user"}) + + expect(context.valid?).to be true + expect(context.individual_context(-1)).to be_nil + expect(context.individual_context(0)).to eq(context) + expect(context.individual_context(1)).to be_nil + + expect(context.individual_context("user")).to eq(context) + expect(context.individual_context("org")).to be_nil + end + + it "multi-kind contexts can return nested contexts" do + user_context = subject.create({ key: "user-key", kind: "user" }) + org_context = subject.create({ key: "org-key", kind: "org" }) + multi_context = subject.create_multi([user_context, org_context]) + + expect(multi_context.valid?).to be true + expect(multi_context.individual_context(-1)).to be_nil + expect(multi_context.individual_context(0)).to eq(user_context) + expect(multi_context.individual_context(1)).to eq(org_context) + + expect(multi_context.individual_context("user")).to eq(user_context) + expect(multi_context.individual_context("org")).to eq(org_context) + end + end + + describe "value retrieval" do + describe "supports simple attribute retrieval" do + it "can retrieve the correct simple attribute value" do + context = subject.create({ key: "my-key", kind: "org", name: "x", :"my-attr" => "y", :"/starts-with-slash" => "z" }) + + expect(context.get_value("kind")).to eq("org") + expect(context.get_value("key")).to eq("my-key") + expect(context.get_value("name")).to eq("x") + expect(context.get_value("my-attr")).to eq("y") + expect(context.get_value("/starts-with-slash")).to eq("z") + end + + it "does not allow querying subpath/elements" do + object_value = { a: 1 } + array_value = [1] + + context = subject.create({ key: "my-key", kind: "org", :"obj-attr" => object_value, :"array-attr" => array_value }) + expect(context.get_value("obj-attr")).to eq(object_value) + expect(context.get_value(:"array-attr")).to eq(array_value) + + expect(context.get_value(:"/obj-attr/a")).to be_nil + expect(context.get_value(:"/array-attr/0")).to be_nil + end + end + + describe "supports retrieval" do + it "with only support kind for multi-kind contexts" do + user_context = subject.create({ key: 'user', name: 'Ruby', anonymous: true }) + org_context = subject.create({ key: 'ld', kind: 'org', name: 'LaunchDarkly', anonymous: false }) + + multi_context = subject.create_multi([user_context, org_context]) + + [ + ['kind', eq('multi')], + ['key', be_nil], + ['name', be_nil], + ['anonymous', be_nil], + ].each do |(reference, matcher)| + expect(multi_context.get_value_for_reference(LaunchDarkly::Reference.create(reference))).to matcher + end + end + + it "with basic attributes" do + legacy_user = subject.create({ key: 'user', name: 'Ruby', privateAttributeNames: ['name'] }) + org_context = subject.create({ key: 'ld', kind: 'org', name: 'LaunchDarkly', anonymous: true, _meta: { privateAttributes: ['name'] } }) + + [ + # Simple top level attributes are accessible + ['kind', eq('user'), eq('org')], + ['key', eq('user'), eq('ld')], + ['name', eq('Ruby'), eq('LaunchDarkly')], + ['anonymous', eq(false), eq(true)], + + # Cannot access meta data + ['privateAttributeNames', be_nil, be_nil], + ['privateAttributes', be_nil, be_nil], + ].each do |(reference, user_matcher, org_matcher)| + ref = LaunchDarkly::Reference.create(reference) + expect(legacy_user.get_value_for_reference(ref)).to user_matcher + expect(org_context.get_value_for_reference(ref)).to org_matcher + end + end + + it "with complex attributes" do + address = { city: "Oakland", state: "CA", zip: 94612 } + tags = ["LaunchDarkly", "Feature Flags"] + nested = { upper: { middle: { name: "Middle Level", inner: { levels: [0, 1, 2] } }, name: "Upper Level" } } + + legacy_user = subject.create({ key: 'user', name: 'Ruby', custom: { address: address, tags: tags, nested: nested }}) + org_context = subject.create({ key: 'ld', kind: 'org', name: 'LaunchDarkly', anonymous: true, address: address, tags: tags, nested: nested }) + + [ + # Simple top level attributes are accessible + ['/address', eq(address)], + ['/address/city', eq('Oakland')], + + ['/tags', eq(tags)], + + ['/nested/upper/name', eq('Upper Level')], + ['/nested/upper/middle/name', eq('Middle Level')], + ['/nested/upper/middle/inner/levels', eq([0, 1, 2])], + ].each do |(reference, matcher)| + ref = LaunchDarkly::Reference.create(reference) + expect(legacy_user.get_value_for_reference(ref)).to matcher + expect(org_context.get_value_for_reference(ref)).to matcher + end + end + end + end +end diff --git a/spec/diagnostic_events_spec.rb b/spec/diagnostic_events_spec.rb index 7e1bce7f..786e3764 100644 --- a/spec/diagnostic_events_spec.rb +++ b/spec/diagnostic_events_spec.rb @@ -31,14 +31,13 @@ def expected_default_config diagnosticRecordingIntervalMillis: Config.default_diagnostic_recording_interval * 1000, eventsCapacity: Config.default_capacity, eventsFlushIntervalMillis: Config.default_flush_interval * 1000, - inlineUsersInEvents: false, pollingIntervalMillis: Config.default_poll_interval * 1000, socketTimeoutMillis: Config.default_read_timeout * 1000, streamingDisabled: false, - userKeysCapacity: Config.default_user_keys_capacity, - userKeysFlushIntervalMillis: Config.default_user_keys_flush_interval * 1000, + userKeysCapacity: Config.default_context_keys_capacity, + userKeysFlushIntervalMillis: Config.default_context_keys_flush_interval * 1000, usingProxy: false, - usingRelayDaemon: false + usingRelayDaemon: false, } end @@ -64,13 +63,12 @@ def expected_default_config [ { diagnostic_recording_interval: 9999 }, { diagnosticRecordingIntervalMillis: 9999000 } ], [ { capacity: 4000 }, { eventsCapacity: 4000 } ], [ { flush_interval: 46 }, { eventsFlushIntervalMillis: 46000 } ], - [ { inline_users_in_events: true }, { inlineUsersInEvents: true } ], [ { poll_interval: 999 }, { pollingIntervalMillis: 999000 } ], [ { read_timeout: 46 }, { socketTimeoutMillis: 46000 } ], [ { stream: false }, { streamingDisabled: true } ], - [ { user_keys_capacity: 999 }, { userKeysCapacity: 999 } ], - [ { user_keys_flush_interval: 999 }, { userKeysFlushIntervalMillis: 999000 } ], - [ { use_ldd: true }, { usingRelayDaemon: true } ] + [ { context_keys_capacity: 999 }, { userKeysCapacity: 999 } ], + [ { context_keys_flush_interval: 999 }, { userKeysFlushIntervalMillis: 999000 } ], + [ { use_ldd: true }, { usingRelayDaemon: true } ], ] changes_and_expected.each do |config_values, expected_values| config = Config.new(config_values) @@ -95,7 +93,7 @@ def expected_default_config event = default_acc.create_init_event(Config.new) expect(event[:sdk]).to eq ({ name: 'ruby-server-sdk', - version: LaunchDarkly::VERSION + version: LaunchDarkly::VERSION, }) end @@ -105,14 +103,14 @@ def expected_default_config name: 'ruby-server-sdk', version: LaunchDarkly::VERSION, wrapperName: 'my-wrapper', - wrapperVersion: '2.0' + wrapperVersion: '2.0', }) end it "has expected platform data" do event = default_acc.create_init_event(Config.new) expect(event[:platform]).to include ({ - name: 'ruby' + name: 'ruby', }) end end @@ -127,7 +125,7 @@ def expected_default_config droppedEvents: 2, deduplicatedUsers: 3, eventsInLastBatch: 4, - streamInits: [] + streamInits: [], }) expect(event[:creationDate]).not_to be_nil expect(event[:dataSinceDate]).not_to be_nil @@ -149,14 +147,14 @@ def expected_default_config droppedEvents: 2, deduplicatedUsers: 3, eventsInLastBatch: 4, - streamInits: [{ timestamp: 1000, failed: false, durationMillis: 2000 }] + streamInits: [{ timestamp: 1000, failed: false, durationMillis: 2000 }], }) expect(event2).to include ({ dataSinceDate: event1[:creationDate], droppedEvents: 5, deduplicatedUsers: 6, eventsInLastBatch: 7, - streamInits: [] + streamInits: [], }) end end diff --git a/spec/evaluation_detail_spec.rb b/spec/evaluation_detail_spec.rb index 7b1b6856..df880447 100644 --- a/spec/evaluation_detail_spec.rb +++ b/spec/evaluation_detail_spec.rb @@ -69,7 +69,7 @@ module LaunchDarkly expect(reason).not_to eq values[j][0] end end - if !unequal_values.nil? + unless unequal_values.nil? unequal_values.each do |v| expect(reason).not_to eq v end diff --git a/spec/event_sender_spec.rb b/spec/event_sender_spec.rb index 2b7fe38b..a8325ff1 100644 --- a/spec/event_sender_spec.rb +++ b/spec/event_sender_spec.rb @@ -43,9 +43,9 @@ def with_sender_and_server "authorization" => [ sdk_key ], "content-type" => [ "application/json" ], "user-agent" => [ "RubyClient/" + LaunchDarkly::VERSION ], - "x-launchdarkly-event-schema" => [ "3" ], + "x-launchdarkly-event-schema" => [ "4" ], "x-launchdarkly-tags" => [ "application-id/id application-version/version" ], - "connection" => [ "Keep-Alive" ] + "connection" => [ "Keep-Alive" ], }) expect(req.header['x-launchdarkly-payload-id']).not_to eq [] end @@ -102,7 +102,7 @@ def with_sender_and_server "authorization" => [ sdk_key ], "content-type" => [ "application/json" ], "user-agent" => [ "RubyClient/" + LaunchDarkly::VERSION ], - "connection" => [ "Keep-Alive" ] + "connection" => [ "Keep-Alive" ], }) expect(req.header['x-launchdarkly-event-schema']).to eq [] expect(req.header['x-launchdarkly-payload-id']).to eq [] diff --git a/spec/events_spec.rb b/spec/events_spec.rb index 894c3f70..17a23eb4 100644 --- a/spec/events_spec.rb +++ b/spec/events_spec.rb @@ -11,12 +11,7 @@ let(:starting_timestamp) { 1000 } let(:default_config_opts) { { diagnostic_opt_out: true, logger: $null_log } } let(:default_config) { LaunchDarkly::Config.new(default_config_opts) } - let(:user) { { key: "userkey", name: "Red" } } - let(:filtered_user) { { key: "userkey", privateAttrs: [ "name" ] } } - let(:numeric_user) { { key: 1, secondary: 2, ip: 3, country: 4, email: 5, firstName: 6, lastName: 7, - avatar: 8, name: 9, anonymous: false, custom: { age: 99 } } } - let(:stringified_numeric_user) { { key: '1', secondary: '2', ip: '3', country: '4', email: '5', firstName: '6', - lastName: '7', avatar: '8', name: '9', anonymous: false, custom: { age: 99 } } } + let(:context) { LaunchDarkly::LDContext.create({ kind: "user", key: "userkey", name: "Red" }) } def with_processor_and_sender(config) sender = FakeEventSender.new @@ -27,7 +22,7 @@ def with_processor_and_sender(config) t = timestamp timestamp += 1 t - } + }, }) begin yield ep, sender @@ -38,126 +33,62 @@ def with_processor_and_sender(config) it "queues identify event" do with_processor_and_sender(default_config) do |ep, sender| - ep.record_identify_event(user) + ep.record_identify_event(context) output = flush_and_get_events(ep, sender) - expect(output).to contain_exactly(eq(identify_event(user))) + expect(output).to contain_exactly(eq(identify_event(default_config, context))) end end - it "filters user in identify event" do + it "filters context in identify event" do config = LaunchDarkly::Config.new(default_config_opts.merge(all_attributes_private: true)) with_processor_and_sender(config) do |ep, sender| - ep.record_identify_event(user) + ep.record_identify_event(context) output = flush_and_get_events(ep, sender) - expect(output).to contain_exactly(eq(identify_event(filtered_user))) - end - end - - it "stringifies built-in user attributes in identify event" do - with_processor_and_sender(default_config) do |ep, sender| - ep.record_identify_event(numeric_user) - - output = flush_and_get_events(ep, sender) - expect(output).to contain_exactly(eq(identify_event(stringified_numeric_user))) + expect(output).to contain_exactly(eq(identify_event(config, context))) end end it "queues individual feature event with index event" do with_processor_and_sender(default_config) do |ep, sender| flag = { key: "flagkey", version: 11 } - ep.record_eval_event(user, 'flagkey', 11, 1, 'value', nil, nil, true) + ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, true) output = flush_and_get_events(ep, sender) expect(output).to contain_exactly( - eq(index_event(user)), - eq(feature_event(flag, user, 1, 'value')), + eq(index_event(default_config, context)), + eq(feature_event(flag, context, 1, 'value')), include(:kind => "summary") ) end end - it "filters user in index event" do + it "filters context in index event" do config = LaunchDarkly::Config.new(default_config_opts.merge(all_attributes_private: true)) with_processor_and_sender(config) do |ep, sender| flag = { key: "flagkey", version: 11 } - ep.record_eval_event(user, 'flagkey', 11, 1, 'value', nil, nil, true) - - output = flush_and_get_events(ep, sender) - expect(output).to contain_exactly( - eq(index_event(filtered_user)), - eq(feature_event(flag, user, 1, 'value')), - include(:kind => "summary") - ) - end - end - - it "stringifies built-in user attributes in index event" do - with_processor_and_sender(default_config) do |ep, sender| - flag = { key: "flagkey", version: 11 } - ep.record_eval_event(numeric_user, 'flagkey', 11, 1, 'value', nil, nil, true) - - output = flush_and_get_events(ep, sender) - expect(output).to contain_exactly( - eq(index_event(stringified_numeric_user)), - eq(feature_event(flag, stringified_numeric_user, 1, 'value')), - include(:kind => "summary") - ) - end - end - - it "can include inline user in feature event" do - config = LaunchDarkly::Config.new(default_config_opts.merge(inline_users_in_events: true)) - with_processor_and_sender(config) do |ep, sender| - flag = { key: "flagkey", version: 11 } - ep.record_eval_event(user, 'flagkey', 11, 1, 'value', nil, nil, true) - - output = flush_and_get_events(ep, sender) - expect(output).to contain_exactly( - eq(feature_event(flag, user, 1, 'value', true)), - include(:kind => "summary") - ) - end - end - - it "stringifies built-in user attributes in feature event" do - config = LaunchDarkly::Config.new(default_config_opts.merge(inline_users_in_events: true)) - with_processor_and_sender(config) do |ep, sender| - flag = { key: "flagkey", version: 11 } - ep.record_eval_event(numeric_user, 'flagkey', 11, 1, 'value', nil, nil, true) - - output = flush_and_get_events(ep, sender) - expect(output).to contain_exactly( - eq(feature_event(flag, stringified_numeric_user, 1, 'value', true)), - include(:kind => "summary") - ) - end - end - - it "filters user in feature event" do - config = LaunchDarkly::Config.new(default_config_opts.merge(all_attributes_private: true, inline_users_in_events: true)) - with_processor_and_sender(config) do |ep, sender| - flag = { key: "flagkey", version: 11 } - ep.record_eval_event(user, 'flagkey', 11, 1, 'value', nil, nil, true) + ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, true) output = flush_and_get_events(ep, sender) expect(output).to contain_exactly( - eq(feature_event(flag, filtered_user, 1, 'value', true)), + eq(index_event(config, context)), + eq(feature_event(flag, context, 1, 'value')), include(:kind => "summary") ) end end - it "still generates index event if inline_users is true but feature event was not tracked" do - config = LaunchDarkly::Config.new(default_config_opts.merge(inline_users_in_events: true)) + it "filters context in feature event" do + config = LaunchDarkly::Config.new(default_config_opts.merge(all_attributes_private: true)) with_processor_and_sender(config) do |ep, sender| flag = { key: "flagkey", version: 11 } - ep.record_eval_event(user, 'flagkey', 11, 1, 'value', nil, nil, false) + ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, true) output = flush_and_get_events(ep, sender) expect(output).to contain_exactly( - eq(index_event(user)), + eq(index_event(config, context)), + eq(feature_event(flag, context, 1, 'value')), include(:kind => "summary") ) end @@ -167,12 +98,12 @@ def with_processor_and_sender(config) with_processor_and_sender(default_config) do |ep, sender| flag = { key: "flagkey", version: 11 } future_time = (Time.now.to_f * 1000).to_i + 1000000 - ep.record_eval_event(user, 'flagkey', 11, 1, 'value', nil, nil, false, future_time) + ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, false, future_time) output = flush_and_get_events(ep, sender) expect(output).to contain_exactly( - eq(index_event(user)), - eq(debug_event(flag, user, 1, 'value')), + eq(index_event(default_config, context)), + eq(debug_event(default_config, flag, context, 1, 'value')), include(:kind => "summary") ) end @@ -182,13 +113,13 @@ def with_processor_and_sender(config) with_processor_and_sender(default_config) do |ep, sender| flag = { key: "flagkey", version: 11 } future_time = (Time.now.to_f * 1000).to_i + 1000000 - ep.record_eval_event(user, 'flagkey', 11, 1, 'value', nil, nil, true, future_time) + ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, true, future_time) output = flush_and_get_events(ep, sender) expect(output).to contain_exactly( - eq(index_event(user)), - eq(feature_event(flag, user, 1, 'value')), - eq(debug_event(flag, user, 1, 'value')), + eq(index_event(default_config, context)), + eq(feature_event(flag, context, 1, 'value')), + eq(debug_event(default_config, flag, context, 1, 'value')), include(:kind => "summary") ) end @@ -201,15 +132,15 @@ def with_processor_and_sender(config) # Send and flush an event we don't care about, just to set the last server time sender.result = LaunchDarkly::Impl::EventSenderResult.new(true, false, server_time) - - ep.record_identify_event(user) + + ep.record_identify_event(context) flush_and_get_events(ep, sender) # Now send an event with debug mode on, with a "debug until" time that is further in # the future than the server time, but in the past compared to the client. flag = { key: "flagkey", version: 11 } debug_until = (server_time.to_f * 1000).to_i + 1000 - ep.record_eval_event(user, 'flagkey', 11, 1, 'value', nil, nil, false, debug_until) + ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, false, debug_until) # Should get a summary event only, not a full feature event output = flush_and_get_events(ep, sender) @@ -226,14 +157,14 @@ def with_processor_and_sender(config) # Send and flush an event we don't care about, just to set the last server time sender.result = LaunchDarkly::Impl::EventSenderResult.new(true, false, server_time) - ep.record_identify_event(user) + ep.record_identify_event(context) flush_and_get_events(ep, sender) # Now send an event with debug mode on, with a "debug until" time that is further in # the future than the server time, but in the past compared to the client. flag = { key: "flagkey", version: 11 } debug_until = (server_time.to_f * 1000).to_i - 1000 - ep.record_eval_event(user, 'flagkey', 11, 1, 'value', nil, nil, false, debug_until) + ep.record_eval_event(context, 'flagkey', 11, 1, 'value', nil, nil, false, debug_until) # Should get a summary event only, not a full feature event output = flush_and_get_events(ep, sender) @@ -243,19 +174,19 @@ def with_processor_and_sender(config) end end - it "generates only one index event for multiple events with same user" do + it "generates only one index event for multiple events with same context" do with_processor_and_sender(default_config) do |ep, sender| flag1 = { key: "flagkey1", version: 11 } flag2 = { key: "flagkey2", version: 22 } future_time = (Time.now.to_f * 1000).to_i + 1000000 - ep.record_eval_event(user, 'flagkey1', 11, 1, 'value', nil, nil, true) - ep.record_eval_event(user, 'flagkey2', 22, 1, 'value', nil, nil, true) + ep.record_eval_event(context, 'flagkey1', 11, 1, 'value', nil, nil, true) + ep.record_eval_event(context, 'flagkey2', 22, 1, 'value', nil, nil, true) output = flush_and_get_events(ep, sender) expect(output).to contain_exactly( - eq(index_event(user)), - eq(feature_event(flag1, user, 1, 'value', false, starting_timestamp)), - eq(feature_event(flag2, user, 1, 'value', false, starting_timestamp + 1)), + eq(index_event(default_config, context)), + eq(feature_event(flag1, context, 1, 'value', starting_timestamp)), + eq(feature_event(flag2, context, 1, 'value', starting_timestamp + 1)), include(:kind => "summary") ) end @@ -263,120 +194,80 @@ def with_processor_and_sender(config) it "summarizes non-tracked events" do with_processor_and_sender(default_config) do |ep, sender| - flag1 = { key: "flagkey1", version: 11 } - flag2 = { key: "flagkey2", version: 22 } - future_time = (Time.now.to_f * 1000).to_i + 1000000 - ep.record_eval_event(user, 'flagkey1', 11, 1, 'value1', nil, 'default1', false) - ep.record_eval_event(user, 'flagkey2', 22, 2, 'value2', nil, 'default2', false) + ep.record_eval_event(context, 'flagkey1', 11, 1, 'value1', nil, 'default1', false) + ep.record_eval_event(context, 'flagkey2', 22, 2, 'value2', nil, 'default2', false) output = flush_and_get_events(ep, sender) expect(output).to contain_exactly( - eq(index_event(user)), + eq(index_event(default_config, context)), eq({ kind: "summary", startDate: starting_timestamp, endDate: starting_timestamp + 1, features: { flagkey1: { + contextKinds: ["user"], default: "default1", counters: [ - { version: 11, variation: 1, value: "value1", count: 1 } - ] + { version: 11, variation: 1, value: "value1", count: 1 }, + ], }, flagkey2: { + contextKinds: ["user"], default: "default2", counters: [ - { version: 22, variation: 2, value: "value2", count: 1 } - ] - } - } + { version: 22, variation: 2, value: "value2", count: 1 }, + ], + }, + }, }) ) end end - it "queues custom event with user" do + it "queues custom event with context" do with_processor_and_sender(default_config) do |ep, sender| - ep.record_custom_event(user, 'eventkey', { thing: 'stuff' }, 1.5) - - output = flush_and_get_events(ep, sender) - expect(output).to contain_exactly( - eq(index_event(user)), - eq(custom_event(user, 'eventkey', { thing: 'stuff' }, 1.5)) - ) - end - end - - it "can include inline user in custom event" do - config = LaunchDarkly::Config.new(default_config_opts.merge(inline_users_in_events: true)) - with_processor_and_sender(config) do |ep, sender| - ep.record_custom_event(user, 'eventkey') - - output = flush_and_get_events(ep, sender) - expect(output).to contain_exactly( - eq(custom_event(user, 'eventkey', nil, nil, true)) - ) - end - end - - it "filters user in custom event" do - config = LaunchDarkly::Config.new(default_config_opts.merge(all_attributes_private: true, inline_users_in_events: true)) - with_processor_and_sender(config) do |ep, sender| - ep.record_custom_event(user, 'eventkey') + ep.record_custom_event(context, 'eventkey', { thing: 'stuff' }, 1.5) output = flush_and_get_events(ep, sender) expect(output).to contain_exactly( - eq(custom_event(filtered_user, 'eventkey', nil, nil, true)) + eq(index_event(default_config, context)), + eq(custom_event(context, 'eventkey', { thing: 'stuff' }, 1.5)) ) end end - it "stringifies built-in user attributes in custom event" do - config = LaunchDarkly::Config.new(default_config_opts.merge(inline_users_in_events: true)) + it "filters context in custom event" do + config = LaunchDarkly::Config.new(default_config_opts.merge(all_attributes_private: true)) with_processor_and_sender(config) do |ep, sender| - ep.record_custom_event(numeric_user, 'eventkey', nil, nil) + ep.record_custom_event(context, 'eventkey') output = flush_and_get_events(ep, sender) expect(output).to contain_exactly( - eq(custom_event(stringified_numeric_user, 'eventkey', nil, nil, true)) + eq(index_event(config, context)), + eq(custom_event(context, 'eventkey', nil, nil)) ) end end - it "queues alias event" do - with_processor_and_sender(default_config) do |ep, sender| - ep.record_alias_event({ key: 'a' }, { key: 'b', anonymous: true }) - - output = flush_and_get_events(ep, sender) - expect(output).to contain_exactly({ - creationDate: starting_timestamp, - kind: 'alias', - key: 'a', - contextKind: 'user', - previousKey: 'b', - previousContextKind: 'anonymousUser' - }) - end - end - it "treats nil value for custom the same as an empty hash" do with_processor_and_sender(default_config) do |ep, sender| - user_with_nil_custom = { key: "userkey", custom: nil } + user_with_nil_custom = LaunchDarkly::LDContext.create({ key: "userkey", custom: nil }) ep.record_identify_event(user_with_nil_custom) output = flush_and_get_events(ep, sender) - expect(output).to contain_exactly(eq(identify_event(user_with_nil_custom))) + expect(output).to contain_exactly(eq(identify_event(default_config, user_with_nil_custom))) end end it "does a final flush when shutting down" do with_processor_and_sender(default_config) do |ep, sender| - ep.record_identify_event(user) - + ep.record_identify_event(context) + ep.stop output = sender.analytics_payloads.pop - expect(output).to contain_exactly(eq(identify_event(user))) + expect(output).to contain_exactly(eq(identify_event(default_config, context))) end end @@ -391,10 +282,10 @@ def with_processor_and_sender(config) it "stops posting events after unrecoverable error" do with_processor_and_sender(default_config) do |ep, sender| sender.result = LaunchDarkly::Impl::EventSenderResult.new(false, true, nil) - e = ep.record_identify_event(user) + e = ep.record_identify_event(context) flush_and_get_events(ep, sender) - ep.record_identify_event(user) + ep.record_identify_event(context) ep.flush ep.wait_until_inactive expect(sender.analytics_payloads.empty?).to be true @@ -422,7 +313,7 @@ def with_diagnostic_processor_and_sender(config) event = sender.diagnostic_payloads.pop expect(event).to include({ kind: 'diagnostic-init', - id: default_id + id: default_id, }) end end @@ -437,7 +328,7 @@ def with_diagnostic_processor_and_sender(config) droppedEvents: 0, deduplicatedUsers: 0, eventsInLastBatch: 0, - streamInits: [] + streamInits: [], }) end end @@ -448,7 +339,7 @@ def with_diagnostic_processor_and_sender(config) init_event = sender.diagnostic_payloads.pop 3.times do - ep.record_identify_event(user) + ep.record_identify_event(context) end flush_and_get_events(ep, sender) @@ -456,63 +347,93 @@ def with_diagnostic_processor_and_sender(config) expect(periodic_event).to include({ kind: 'diagnostic', droppedEvents: 1, - eventsInLastBatch: 2 + eventsInLastBatch: 2, }) end end - it "counts deduplicated users" do + it "counts deduplicated contexts" do with_diagnostic_processor_and_sender(diagnostic_config) do |ep, sender| - init_event = sender.diagnostic_payloads.pop + sender.diagnostic_payloads.pop - ep.record_custom_event(user, 'event1') - ep.record_custom_event(user, 'event2') - events = flush_and_get_events(ep, sender) + ep.record_custom_event(context, 'event1') + ep.record_custom_event(context, 'event2') + flush_and_get_events(ep, sender) periodic_event = sender.diagnostic_payloads.pop expect(periodic_event).to include({ kind: 'diagnostic', - deduplicatedUsers: 1 + deduplicatedUsers: 1, }) end end end - def index_event(user, timestamp = starting_timestamp) - { + # + # @param config [LaunchDarkly::Config] + # @param context [LaunchDarkly::LDContext] + # @param timestamp [Integer] + # @return [Hash] + # + def index_event(config, context, timestamp = starting_timestamp) + context_filter = LaunchDarkly::Impl::ContextFilter.new(config.all_attributes_private, config.private_attributes) + out = { kind: "index", creationDate: timestamp, - user: user + context: context_filter.filter(context), } + JSON.parse(out.to_json, symbolize_names: true) end - def identify_event(user, timestamp = starting_timestamp) - { + # + # @param config [LaunchDarkly::Config] + # @param context [LaunchDarkly::LDContext] + # @param timestamp [Integer] + # @return [Hash] + # + def identify_event(config, context, timestamp = starting_timestamp) + context_filter = LaunchDarkly::Impl::ContextFilter.new(config.all_attributes_private, config.private_attributes) + out = { kind: "identify", creationDate: timestamp, - key: user[:key], - user: user + key: context.fully_qualified_key, + context: context_filter.filter(context), } - end - - def feature_event(flag, user, variation, value, inline_user = false, timestamp = starting_timestamp) + JSON.parse(out.to_json, symbolize_names: true) + end + + # + # @param flag [Hash] + # @param context [LaunchDarkly::LDContext] + # @param variation [Integer] + # @param value [any] + # @param timestamp [Integer] + # @return [Hash] + # + def feature_event(flag, context, variation, value, timestamp = starting_timestamp) out = { kind: 'feature', creationDate: timestamp, + contextKeys: context.keys, key: flag[:key], variation: variation, version: flag[:version], - value: value + value: value, } - if inline_user - out[:user] = user - else - out[:userKey] = user[:key] - end - out - end - - def debug_event(flag, user, variation, value, timestamp = starting_timestamp) + JSON.parse(out.to_json, symbolize_names: true) + end + + # + # @param config [LaunchDarkly::Config] + # @param flag [Hash] + # @param context [LaunchDarkly::LDContext] + # @param variation [Integer] + # @param value [any] + # @param timestamp [Integer] + # @return [Hash] + # + def debug_event(config, flag, context, variation, value, timestamp = starting_timestamp) + context_filter = LaunchDarkly::Impl::ContextFilter.new(config.all_attributes_private, config.private_attributes) out = { kind: 'debug', creationDate: timestamp, @@ -520,25 +441,29 @@ def debug_event(flag, user, variation, value, timestamp = starting_timestamp) variation: variation, version: flag[:version], value: value, - user: user + context: context_filter.filter(context), } - out + JSON.parse(out.to_json, symbolize_names: true) end - def custom_event(user, key, data, metric_value, inline_user = false, timestamp = starting_timestamp) + # + # @param context [LaunchDarkly::LDContext] + # @param key [String] + # @param data [any] + # @param metric_value [any] + # @return [Hash] + # + def custom_event(context, key, data, metric_value) out = { kind: "custom", - creationDate: timestamp, - key: key + creationDate: starting_timestamp, + contextKeys: context.keys, + key: key, } - out[:data] = data if !data.nil? - if inline_user - out[:user] = user - else - out[:userKey] = user[:key] - end - out[:metricValue] = metric_value if !metric_value.nil? - out + out[:data] = data unless data.nil? + out[:metricValue] = metric_value unless metric_value.nil? + + JSON.parse(out.to_json, symbolize_names: true) end def flush_and_get_events(ep, sender) diff --git a/spec/events_test_util.rb b/spec/events_test_util.rb index 66b5b97d..672360b3 100644 --- a/spec/events_test_util.rb +++ b/spec/events_test_util.rb @@ -1,19 +1,15 @@ require "ldclient-rb/impl/event_types" -def make_eval_event(timestamp, user, key, version = nil, variation = nil, value = nil, reason = nil, +def make_eval_event(timestamp, context, key, version = nil, variation = nil, value = nil, reason = nil, default = nil, track_events = false, debug_until = nil, prereq_of = nil) - LaunchDarkly::Impl::EvalEvent.new(timestamp, user, key, version, variation, value, reason, + LaunchDarkly::Impl::EvalEvent.new(timestamp, context, key, version, variation, value, reason, default, track_events, debug_until, prereq_of) end -def make_identify_event(timestamp, user) - LaunchDarkly::Impl::IdentifyEvent.new(timestamp, user) +def make_identify_event(timestamp, context) + LaunchDarkly::Impl::IdentifyEvent.new(timestamp, context) end -def make_custom_event(timestamp, user, key, data = nil, metric_value = nil) - LaunchDarkly::Impl::CustomEvent.new(timestamp, user, key, data, metric_value) -end - -def make_alias_event(timestamp, key, context_kind, previous_key, previous_context_kind) - LaunchDarkly::Impl::AliasEvent.new(timestamp, key, context_kind, previous_key, previous_context_kind) +def make_custom_event(timestamp, context, key, data = nil, metric_value = nil) + LaunchDarkly::Impl::CustomEvent.new(timestamp, context, key, data, metric_value) end diff --git a/spec/expiring_cache_spec.rb b/spec/expiring_cache_spec.rb index ed021c34..7d757acf 100644 --- a/spec/expiring_cache_spec.rb +++ b/spec/expiring_cache_spec.rb @@ -10,7 +10,7 @@ after(:each) do Timecop.return end - + it "evicts entries based on TTL" do c = subject.new(3, 300) c[:a] = 1 @@ -45,7 +45,7 @@ expect(c[:a]).to be nil expect(c[:b]).to eq 2 - expect(c[:c]).to eq 3 + expect(c[:c]).to eq 3 end it "resets LRU on put" do @@ -57,7 +57,7 @@ expect(c[:a]).to eq 1 expect(c[:b]).to be nil - expect(c[:c]).to eq 3 + expect(c[:c]).to eq 3 end it "resets TTL on put" do diff --git a/spec/feature_store_spec_base.rb b/spec/feature_store_spec_base.rb index 78fc8596..11df5969 100644 --- a/spec/feature_store_spec_base.rb +++ b/spec/feature_store_spec_base.rb @@ -41,7 +41,7 @@ key: $key1, name: "Thing 1", version: 11, - deleted: false + deleted: false, } $unused_key = "no" @@ -111,7 +111,7 @@ def new_version_plus(f, deltaVersion, attrs = {}) key: key2, name: "Thing 2", version: 22, - deleted: false + deleted: false, } with_inited_store([ $thing1, thing2 ]) do |store| expect(store.all($things_kind)).to eq ({ $key1.to_sym => $thing1, key2.to_sym => thing2 }) @@ -124,7 +124,7 @@ def new_version_plus(f, deltaVersion, attrs = {}) key: key2, name: "Thing 2", version: 22, - deleted: true + deleted: true, } with_inited_store([ $thing1, thing2 ]) do |store| expect(store.all($things_kind)).to eq ({ $key1.to_sym => $thing1 }) @@ -188,7 +188,7 @@ def new_version_plus(f, deltaVersion, attrs = {}) key: "my-fancy-flag", name: "Tęst Feåtūre Flæg😺", version: 1, - deleted: false + deleted: false, } with_inited_store([]) do |store| store.upsert(LaunchDarkly::FEATURES, flag) @@ -208,11 +208,11 @@ def new_version_plus(f, deltaVersion, attrs = {}) caching_test_groups = [ ["with caching", { expiration: 60 }], - ["without caching", { expiration: 0 }] + ["without caching", { expiration: 0 }], ] prefix_test_groups = [ ["with default prefix", {}], - ["with specified prefix", { prefix: "testprefix" }] + ["with specified prefix", { prefix: "testprefix" }], ] caching_test_groups.each do |test_group_description, caching_options| @@ -226,7 +226,7 @@ def new_version_plus(f, deltaVersion, attrs = {}) store_tester = store_tester_class.new(base_options) before(:each) { store_tester.clear_data } - + include_examples "any_feature_store", store_tester it "can detect if another instance has initialized the store" do diff --git a/spec/flags_state_spec.rb b/spec/flags_state_spec.rb index 323c6c31..006fb88f 100644 --- a/spec/flags_state_spec.rb +++ b/spec/flags_state_spec.rb @@ -31,6 +31,7 @@ it "can be converted to JSON structure" do state = subject.new(true) flag_state1 = { key: "key1", version: 100, trackEvents: false, value: 'value1', variation: 0, reason: LaunchDarkly::EvaluationReason.fallthrough(false) } + # rubocop:disable Layout/LineLength flag_state2 = { key: "key2", version: 200, trackEvents: true, debugEventsUntilDate: 1000, value: 'value2', variation: 1, reason: LaunchDarkly::EvaluationReason.fallthrough(false) } state.add_flag(flag_state1, false, false) state.add_flag(flag_state2, false, false) @@ -42,22 +43,23 @@ '$flagsState' => { 'key1' => { :variation => 0, - :version => 100 + :version => 100, }, 'key2' => { :variation => 1, :version => 200, :trackEvents => true, - :debugEventsUntilDate => 1000 - } + :debugEventsUntilDate => 1000, + }, }, - '$valid' => true + '$valid' => true, }) end it "can be converted to JSON string" do state = subject.new(true) flag_state1 = { key: "key1", version: 100, trackEvents: false, value: 'value1', variation: 0, reason: LaunchDarkly::EvaluationReason.fallthrough(false) } + # rubocop:disable Layout/LineLength flag_state2 = { key: "key2", version: 200, trackEvents: true, debugEventsUntilDate: 1000, value: 'value2', variation: 1, reason: LaunchDarkly::EvaluationReason.fallthrough(false) } state.add_flag(flag_state1, false, false) state.add_flag(flag_state2, false, false) @@ -70,6 +72,7 @@ it "uses our custom serializer with JSON.generate" do state = subject.new(true) flag_state1 = { key: "key1", version: 100, trackEvents: false, value: 'value1', variation: 0, reason: LaunchDarkly::EvaluationReason.fallthrough(false) } + # rubocop:disable Layout/LineLength flag_state2 = { key: "key2", version: 200, trackEvents: true, debugEventsUntilDate: 1000, value: 'value2', variation: 1, reason: LaunchDarkly::EvaluationReason.fallthrough(false) } state.add_flag(flag_state1, false, false) state.add_flag(flag_state2, false, false) diff --git a/spec/http_util.rb b/spec/http_util.rb index 1a789772..32cfd0fe 100644 --- a/spec/http_util.rb +++ b/spec/http_util.rb @@ -15,7 +15,7 @@ def initialize Port: @port, AccessLog: [], Logger: NullLogger.new, - RequestCallback: method(:record_request) + RequestCallback: method(:record_request), } @server = create_server(@port, base_opts) rescue Errno::EADDRINUSE @@ -62,7 +62,7 @@ def setup_status_response(uri_path, status, headers={}) def setup_ok_response(uri_path, body, content_type=nil, headers={}) setup_response(uri_path) do |req, res| res.status = 200 - res.content_type = content_type if !content_type.nil? + res.content_type = content_type unless content_type.nil? res.body = body headers.each { |n, v| res[n] = v } end @@ -80,7 +80,7 @@ def await_request def await_request_with_body r = @requests_queue.pop - return r[0], r[1] + [r[0], r[1]] end end @@ -96,11 +96,11 @@ def initialize def create_server(port, base_opts) WEBrick::HTTPProxyServer.new(base_opts.merge({ ProxyContentHandler: proc do |req,res| - if !@connect_status.nil? + unless @connect_status.nil? res.status = @connect_status end @request_count += 1 - end + end, })) end end @@ -127,6 +127,6 @@ def initialize(ports = {}) end def open(uri, timeout) - TCPSocket.new 'localhost', @ports[uri] + TCPSocket.new '127.0.0.1', @ports[uri] end -end \ No newline at end of file +end diff --git a/spec/impl/big_segments_spec.rb b/spec/impl/big_segments_spec.rb index 89637653..920cf941 100644 --- a/spec/impl/big_segments_spec.rb +++ b/spec/impl/big_segments_spec.rb @@ -11,8 +11,8 @@ module Impl describe BigSegmentStoreManager do subject { BigSegmentStoreManager } - let(:user_key) { 'userkey' } - let(:user_hash) { subject.hash_for_user_key(user_key) } + let(:context_key) { 'contextkey' } + let(:context_hash) { subject.hash_for_context_key(context_key) } let(:null_logger) { double.as_null_object } def always_up_to_date @@ -37,12 +37,12 @@ def with_manager(config) expected_membership = { 'key1' => true, 'key2' => true } store = double expect(store).to receive(:get_metadata).at_least(:once).and_return(always_up_to_date) - expect(store).to receive(:get_membership).with(user_hash).and_return(expected_membership) + expect(store).to receive(:get_membership).with(context_hash).and_return(expected_membership) allow(store).to receive(:stop) with_manager(BigSegmentsConfig.new(store: store)) do |m| expected_result = BigSegmentMembershipResult.new(expected_membership, BigSegmentsStatus::HEALTHY) - expect(m.get_user_membership(user_key)).to eq(expected_result) + expect(m.get_context_membership(context_key)).to eq(expected_result) end end @@ -50,28 +50,28 @@ def with_manager(config) expected_membership = { 'key1' => true, 'key2' => true } store = double expect(store).to receive(:get_metadata).at_least(:once).and_return(always_up_to_date) - expect(store).to receive(:get_membership).with(user_hash).once.and_return(expected_membership) + expect(store).to receive(:get_membership).with(context_hash).once.and_return(expected_membership) # the ".once" on this mock expectation is what verifies that the cache is working; there should only be one query allow(store).to receive(:stop) with_manager(BigSegmentsConfig.new(store: store)) do |m| expected_result = BigSegmentMembershipResult.new(expected_membership, BigSegmentsStatus::HEALTHY) - expect(m.get_user_membership(user_key)).to eq(expected_result) - expect(m.get_user_membership(user_key)).to eq(expected_result) + expect(m.get_context_membership(context_key)).to eq(expected_result) + expect(m.get_context_membership(context_key)).to eq(expected_result) end end it "can cache a nil result" do store = double expect(store).to receive(:get_metadata).at_least(:once).and_return(always_up_to_date) - expect(store).to receive(:get_membership).with(user_hash).once.and_return(nil) + expect(store).to receive(:get_membership).with(context_hash).once.and_return(nil) # the ".once" on this mock expectation is what verifies that the cache is working; there should only be one query allow(store).to receive(:stop) with_manager(BigSegmentsConfig.new(store: store)) do |m| expected_result = BigSegmentMembershipResult.new({}, BigSegmentsStatus::HEALTHY) - expect(m.get_user_membership(user_key)).to eq(expected_result) - expect(m.get_user_membership(user_key)).to eq(expected_result) + expect(m.get_context_membership(context_key)).to eq(expected_result) + expect(m.get_context_membership(context_key)).to eq(expected_result) end end @@ -79,15 +79,15 @@ def with_manager(config) expected_membership = { 'key1' => true, 'key2' => true } store = double expect(store).to receive(:get_metadata).at_least(:once).and_return(always_up_to_date) - expect(store).to receive(:get_membership).with(user_hash).twice.and_return(expected_membership) + expect(store).to receive(:get_membership).with(context_hash).twice.and_return(expected_membership) # the ".twice" on this mock expectation is what verifies that the cached result expired allow(store).to receive(:stop) - with_manager(BigSegmentsConfig.new(store: store, user_cache_time: 0.01)) do |m| + with_manager(BigSegmentsConfig.new(store: store, context_cache_time: 0.01)) do |m| expected_result = BigSegmentMembershipResult.new(expected_membership, BigSegmentsStatus::HEALTHY) - expect(m.get_user_membership(user_key)).to eq(expected_result) + expect(m.get_context_membership(context_key)).to eq(expected_result) sleep(0.1) - expect(m.get_user_membership(user_key)).to eq(expected_result) + expect(m.get_context_membership(context_key)).to eq(expected_result) end end @@ -95,12 +95,12 @@ def with_manager(config) expected_membership = { 'key1' => true, 'key2' => true } store = double expect(store).to receive(:get_metadata).at_least(:once).and_return(always_stale) - expect(store).to receive(:get_membership).with(user_hash).and_return(expected_membership) + expect(store).to receive(:get_membership).with(context_hash).and_return(expected_membership) allow(store).to receive(:stop) with_manager(BigSegmentsConfig.new(store: store)) do |m| expected_result = BigSegmentMembershipResult.new(expected_membership, BigSegmentsStatus::STALE) - expect(m.get_user_membership(user_key)).to eq(expected_result) + expect(m.get_context_membership(context_key)).to eq(expected_result) end end @@ -108,58 +108,58 @@ def with_manager(config) expected_membership = { 'key1' => true, 'key2' => true } store = double expect(store).to receive(:get_metadata).at_least(:once).and_return(nil) - expect(store).to receive(:get_membership).with(user_hash).and_return(expected_membership) + expect(store).to receive(:get_membership).with(context_hash).and_return(expected_membership) allow(store).to receive(:stop) with_manager(BigSegmentsConfig.new(store: store)) do |m| expected_result = BigSegmentMembershipResult.new(expected_membership, BigSegmentsStatus::STALE) - expect(m.get_user_membership(user_key)).to eq(expected_result) + expect(m.get_context_membership(context_key)).to eq(expected_result) end end - it "least recent user is evicted from cache" do - user_key_1, user_key_2, user_key_3 = 'userkey1', 'userkey2', 'userkey3' - user_hash_1, user_hash_2, user_hash_3 = subject.hash_for_user_key(user_key_1), - subject.hash_for_user_key(user_key_2), subject.hash_for_user_key(user_key_3) + it "least recent context is evicted from cache" do + context_key_1, context_key_2, context_key_3 = 'contextkey1', 'contextkey2', 'contextkey3' + context_hash_1, context_hash_2, context_hash_3 = subject.hash_for_context_key(context_key_1), + subject.hash_for_context_key(context_key_2), subject.hash_for_context_key(context_key_3) memberships = { - user_hash_1 => { 'seg1': true }, - user_hash_2 => { 'seg2': true }, - user_hash_3 => { 'seg3': true } + context_hash_1 => { 'seg1': true }, + context_hash_2 => { 'seg2': true }, + context_hash_3 => { 'seg3': true }, } - queried_users = [] + queries_contexts = [] store = double expect(store).to receive(:get_metadata).at_least(:once).and_return(always_up_to_date) expect(store).to receive(:get_membership).exactly(4).times do |key| - queried_users << key + queries_contexts << key memberships[key] end allow(store).to receive(:stop) - with_manager(BigSegmentsConfig.new(store: store, user_cache_size: 2)) do |m| - result1 = m.get_user_membership(user_key_1) - result2 = m.get_user_membership(user_key_2) - result3 = m.get_user_membership(user_key_3) - expect(result1).to eq(BigSegmentMembershipResult.new(memberships[user_hash_1], BigSegmentsStatus::HEALTHY)) - expect(result2).to eq(BigSegmentMembershipResult.new(memberships[user_hash_2], BigSegmentsStatus::HEALTHY)) - expect(result3).to eq(BigSegmentMembershipResult.new(memberships[user_hash_3], BigSegmentsStatus::HEALTHY)) - - expect(queried_users).to eq([user_hash_1, user_hash_2, user_hash_3]) - - # Since the capacity is only 2 and user_key_1 was the least recently used, that key should be - # evicted by the user_key_3 query. Now only user_key_2 and user_key_3 are in the cache, and + with_manager(BigSegmentsConfig.new(store: store, context_cache_size: 2)) do |m| + result1 = m.get_context_membership(context_key_1) + result2 = m.get_context_membership(context_key_2) + result3 = m.get_context_membership(context_key_3) + expect(result1).to eq(BigSegmentMembershipResult.new(memberships[context_hash_1], BigSegmentsStatus::HEALTHY)) + expect(result2).to eq(BigSegmentMembershipResult.new(memberships[context_hash_2], BigSegmentsStatus::HEALTHY)) + expect(result3).to eq(BigSegmentMembershipResult.new(memberships[context_hash_3], BigSegmentsStatus::HEALTHY)) + + expect(queries_contexts).to eq([context_hash_1, context_hash_2, context_hash_3]) + + # Since the capacity is only 2 and context_key_1 was the least recently used, that key should be + # evicted by the context_key_3 query. Now only context_key_2 and context_key_3 are in the cache, and # querying them again should not cause a new query to the store. - result2a = m.get_user_membership(user_key_2) - result3a = m.get_user_membership(user_key_3) + result2a = m.get_context_membership(context_key_2) + result3a = m.get_context_membership(context_key_3) expect(result2a).to eq(result2) expect(result3a).to eq(result3) - expect(queried_users).to eq([user_hash_1, user_hash_2, user_hash_3]) + expect(queries_contexts).to eq([context_hash_1, context_hash_2, context_hash_3]) - result1a = m.get_user_membership(user_key_1) + result1a = m.get_context_membership(context_key_1) expect(result1a).to eq(result1) - - expect(queried_users).to eq([user_hash_1, user_hash_2, user_hash_3, user_hash_1]) + + expect(queries_contexts).to eq([context_hash_1, context_hash_2, context_hash_3, context_hash_1]) end end end diff --git a/spec/impl/context_spec.rb b/spec/impl/context_spec.rb new file mode 100644 index 00000000..ce9d6ff5 --- /dev/null +++ b/spec/impl/context_spec.rb @@ -0,0 +1,31 @@ +require "ldclient-rb/impl/context" + +describe LaunchDarkly::Impl::Context do + subject { LaunchDarkly::Impl::Context } + + it "can validate kind correctly" do + test_cases = [ + [:user_context, LaunchDarkly::Impl::Context::ERR_KIND_NON_STRING], + ["kind", LaunchDarkly::Impl::Context::ERR_KIND_CANNOT_BE_KIND], + ["multi", LaunchDarkly::Impl::Context::ERR_KIND_CANNOT_BE_MULTI], + ["user@type", LaunchDarkly::Impl::Context::ERR_KIND_INVALID_CHARS], + ["org", nil], + ] + + test_cases.each do |input, expected| + expect(subject.validate_kind(input)).to eq(expected) + end + end + + it "can validate a key correctly" do + test_cases = [ + [:key, LaunchDarkly::Impl::Context::ERR_KEY_NON_STRING], + ["", LaunchDarkly::Impl::Context::ERR_KEY_EMPTY], + ["key", nil], + ] + + test_cases.each do |input, expected| + expect(subject.validate_key(input)).to eq(expected) + end + end +end \ No newline at end of file diff --git a/spec/impl/evaluator_big_segments_spec.rb b/spec/impl/evaluator_big_segments_spec.rb index 36767567..3809f61f 100644 --- a/spec/impl/evaluator_big_segments_spec.rb +++ b/spec/impl/evaluator_big_segments_spec.rb @@ -5,154 +5,154 @@ module LaunchDarkly module Impl - evaluator_tests_with_and_without_preprocessing "Evaluator (big segments)" do |desc, factory| - describe "#{desc} - evaluate", :evaluator_spec_base => true do + describe "Evaluator (big segments)" do + describe "evaluate", :evaluator_spec_base => true do it "segment is not matched if there is no way to query it" do - segment = factory.segment({ + segment = Segments.from_hash({ key: 'test', - included: [ user[:key] ], # included should be ignored for a big segment + included: [user_context.key ], # included should be ignored for a big segment version: 1, unbounded: true, - generation: 1 + generation: 1, }) - e = EvaluatorBuilder.new(logger). - with_segment(segment). - build - flag = factory.boolean_flag_with_clauses([make_segment_match_clause(segment)]) - result = e.evaluate(flag, user) + e = EvaluatorBuilder.new(logger) + .with_segment(segment) + .build + flag = Flags.boolean_flag_with_clauses(Clauses.match_segment(segment)) + result = e.evaluate(flag, user_context) expect(result.detail.value).to be false expect(result.detail.reason.big_segments_status).to be(BigSegmentsStatus::NOT_CONFIGURED) end it "segment with no generation is not matched" do - segment = factory.segment({ + segment = Segments.from_hash({ key: 'test', - included: [ user[:key] ], # included should be ignored for a big segment + included: [user_context.key ], # included should be ignored for a big segment version: 1, - unbounded: true + unbounded: true, }) - e = EvaluatorBuilder.new(logger). - with_segment(segment). - build - flag = factory.boolean_flag_with_clauses([make_segment_match_clause(segment)]) - result = e.evaluate(flag, user) + e = EvaluatorBuilder.new(logger) + .with_segment(segment) + .build + flag = Flags.boolean_flag_with_clauses(Clauses.match_segment(segment)) + result = e.evaluate(flag, user_context) expect(result.detail.value).to be false expect(result.detail.reason.big_segments_status).to be(BigSegmentsStatus::NOT_CONFIGURED) end it "matched with include" do - segment = factory.segment({ + segment = Segments.from_hash({ key: 'test', version: 1, unbounded: true, - generation: 2 + generation: 2, }) - e = EvaluatorBuilder.new(logger). - with_segment(segment). - with_big_segment_for_user(user, segment, true). - build - flag = factory.boolean_flag_with_clauses([make_segment_match_clause(segment)]) - result = e.evaluate(flag, user) + e = EvaluatorBuilder.new(logger) + .with_segment(segment) + .with_big_segment_for_context(user_context, segment, true) + .build + flag = Flags.boolean_flag_with_clauses(Clauses.match_segment(segment)) + result = e.evaluate(flag, user_context) expect(result.detail.value).to be true expect(result.detail.reason.big_segments_status).to be(BigSegmentsStatus::HEALTHY) end it "matched with rule" do - segment = factory.segment({ + segment = Segments.from_hash({ key: 'test', version: 1, unbounded: true, generation: 2, rules: [ - { clauses: [ make_user_matching_clause(user) ] } - ] + { clauses: [ Clauses.match_context(user_context) ] }, + ], }) - e = EvaluatorBuilder.new(logger). - with_segment(segment). - with_big_segment_for_user(user, segment, nil). - build - flag = factory.boolean_flag_with_clauses([make_segment_match_clause(segment)]) - result = e.evaluate(flag, user) + e = EvaluatorBuilder.new(logger) + .with_segment(segment) + .with_big_segment_for_context(user_context, segment, nil) + .build + flag = Flags.boolean_flag_with_clauses(Clauses.match_segment(segment)) + result = e.evaluate(flag, user_context) expect(result.detail.value).to be true expect(result.detail.reason.big_segments_status).to be(BigSegmentsStatus::HEALTHY) end it "unmatched by exclude regardless of rule" do - segment = factory.segment({ + segment = Segments.from_hash({ key: 'test', version: 1, unbounded: true, generation: 2, rules: [ - { clauses: [ make_user_matching_clause(user) ] } - ] + { clauses: [ Clauses.match_context(user_context) ] }, + ], }) - e = EvaluatorBuilder.new(logger). - with_segment(segment). - with_big_segment_for_user(user, segment, false). - build - flag = factory.boolean_flag_with_clauses([make_segment_match_clause(segment)]) - result = e.evaluate(flag, user) + e = EvaluatorBuilder.new(logger) + .with_segment(segment) + .with_big_segment_for_context(user_context, segment, false) + .build + flag = Flags.boolean_flag_with_clauses(Clauses.match_segment(segment)) + result = e.evaluate(flag, user_context) expect(result.detail.value).to be false expect(result.detail.reason.big_segments_status).to be(BigSegmentsStatus::HEALTHY) end it "status is returned from provider" do - segment = factory.segment({ + segment = Segments.from_hash({ key: 'test', version: 1, unbounded: true, - generation: 2 + generation: 2, }) - e = EvaluatorBuilder.new(logger). - with_segment(segment). - with_big_segment_for_user(user, segment, true). - with_big_segments_status(BigSegmentsStatus::STALE). - build - flag = factory.boolean_flag_with_clauses([make_segment_match_clause(segment)]) - result = e.evaluate(flag, user) + e = EvaluatorBuilder.new(logger) + .with_segment(segment) + .with_big_segment_for_context(user_context, segment, true) + .with_big_segments_status(BigSegmentsStatus::STALE) + .build + flag = Flags.boolean_flag_with_clauses(Clauses.match_segment(segment)) + result = e.evaluate(flag, user_context) expect(result.detail.value).to be true expect(result.detail.reason.big_segments_status).to be(BigSegmentsStatus::STALE) end it "queries state only once per user even if flag references multiple segments" do - segment1 = factory.segment({ + segment1 = Segments.from_hash({ key: 'segmentkey1', version: 1, unbounded: true, - generation: 2 + generation: 2, }) - segment2 = factory.segment({ + segment2 = Segments.from_hash({ key: 'segmentkey2', version: 1, unbounded: true, - generation: 3 + generation: 3, }) - flag = factory.flag({ + flag = Flags.from_hash({ key: 'key', on: true, fallthrough: { variation: 0 }, variations: [ false, true ], rules: [ - { variation: 1, clauses: [ make_segment_match_clause(segment1) ]}, - { variation: 1, clauses: [ make_segment_match_clause(segment2) ]} - ] + { variation: 1, clauses: [ Clauses.match_segment(segment1) ]}, + { variation: 1, clauses: [ Clauses.match_segment(segment2) ]}, + ], }) - + queries = [] - e = EvaluatorBuilder.new(logger). - with_segment(segment1).with_segment(segment2). - with_big_segment_for_user(user, segment2, true). - record_big_segments_queries(queries). - build + e = EvaluatorBuilder.new(logger) + .with_segment(segment1).with_segment(segment2) + .with_big_segment_for_context(user_context, segment2, true) + .record_big_segments_queries(queries) + .build # The membership deliberately does not include segment1, because we want the first rule to be # a non-match so that it will continue on and check segment2 as well. - - result = e.evaluate(flag, user) + + result = e.evaluate(flag, user_context) expect(result.detail.value).to be true expect(result.detail.reason.big_segments_status).to be(BigSegmentsStatus::HEALTHY) - expect(queries).to eq([ user[:key] ]) + expect(queries).to eq([user_context.key ]) end end end diff --git a/spec/impl/evaluator_bucketing_spec.rb b/spec/impl/evaluator_bucketing_spec.rb index 98dbd13d..005dd888 100644 --- a/spec/impl/evaluator_bucketing_spec.rb +++ b/spec/impl/evaluator_bucketing_spec.rb @@ -1,153 +1,161 @@ +require "model_builders" require "spec_helper" describe LaunchDarkly::Impl::EvaluatorBucketing do subject { LaunchDarkly::Impl::EvaluatorBucketing } - describe "bucket_user" do + describe "bucket_context" do describe "seed exists" do let(:seed) { 61 } it "returns the expected bucket values for seed" do - user = { key: "userKeyA" } - bucket = subject.bucket_user(user, "hashKey", "key", "saltyA", seed) - expect(bucket).to be_within(0.0000001).of(0.09801207); + context = LaunchDarkly::LDContext.create({ key: "userKeyA" }) + bucket = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", seed) + expect(bucket).to be_within(0.0000001).of(0.09801207) - user = { key: "userKeyB" } - bucket = subject.bucket_user(user, "hashKey", "key", "saltyA", seed) - expect(bucket).to be_within(0.0000001).of(0.14483777); + context = LaunchDarkly::LDContext.create({ key: "userKeyB" }) + bucket = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", seed) + expect(bucket).to be_within(0.0000001).of(0.14483777) - user = { key: "userKeyC" } - bucket = subject.bucket_user(user, "hashKey", "key", "saltyA", seed) - expect(bucket).to be_within(0.0000001).of(0.9242641); + context = LaunchDarkly::LDContext.create({ key: "userKeyC" }) + bucket = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", seed) + expect(bucket).to be_within(0.0000001).of(0.9242641) end it "returns the same bucket regardless of hashKey and salt" do - user = { key: "userKeyA" } - bucket1 = subject.bucket_user(user, "hashKey", "key", "saltyA", seed) - bucket2 = subject.bucket_user(user, "hashKey1", "key", "saltyB", seed) - bucket3 = subject.bucket_user(user, "hashKey2", "key", "saltyC", seed) + context = LaunchDarkly::LDContext.create({ key: "userKeyA" }) + bucket1 = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", seed) + bucket2 = subject.bucket_context(context, context.kind, "hashKey1", "key", "saltyB", seed) + bucket3 = subject.bucket_context(context, context.kind, "hashKey2", "key", "saltyC", seed) expect(bucket1).to eq(bucket2) expect(bucket2).to eq(bucket3) end it "returns a different bucket if the seed is not the same" do - user = { key: "userKeyA" } - bucket1 = subject.bucket_user(user, "hashKey", "key", "saltyA", seed) - bucket2 = subject.bucket_user(user, "hashKey1", "key", "saltyB", seed+1) + context = LaunchDarkly::LDContext.create({ key: "userKeyA" }) + bucket1 = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", seed) + bucket2 = subject.bucket_context(context, context.kind, "hashKey1", "key", "saltyB", seed+1) expect(bucket1).to_not eq(bucket2) end - it "returns a different bucket if the user is not the same" do - user1 = { key: "userKeyA" } - user2 = { key: "userKeyB" } - bucket1 = subject.bucket_user(user1, "hashKey", "key", "saltyA", seed) - bucket2 = subject.bucket_user(user2, "hashKey1", "key", "saltyB", seed) + it "returns a different bucket if the context is not the same" do + context1 = LaunchDarkly::LDContext.create({ key: "userKeyA" }) + context2 = LaunchDarkly::LDContext.create({ key: "userKeyB" }) + bucket1 = subject.bucket_context(context1, context1.kind, "hashKey", "key", "saltyA", seed) + bucket2 = subject.bucket_context(context2, context2.kind, "hashKey1", "key", "saltyB", seed) expect(bucket1).to_not eq(bucket2) end end it "gets expected bucket values for specific keys" do - user = { key: "userKeyA" } - bucket = subject.bucket_user(user, "hashKey", "key", "saltyA", nil) - expect(bucket).to be_within(0.0000001).of(0.42157587); + context = LaunchDarkly::LDContext.create({ key: "userKeyA" }) + bucket = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", nil) + expect(bucket).to be_within(0.0000001).of(0.42157587) - user = { key: "userKeyB" } - bucket = subject.bucket_user(user, "hashKey", "key", "saltyA", nil) - expect(bucket).to be_within(0.0000001).of(0.6708485); + context = LaunchDarkly::LDContext.create({ key: "userKeyB" }) + bucket = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", nil) + expect(bucket).to be_within(0.0000001).of(0.6708485) - user = { key: "userKeyC" } - bucket = subject.bucket_user(user, "hashKey", "key", "saltyA", nil) - expect(bucket).to be_within(0.0000001).of(0.10343106); + context = LaunchDarkly::LDContext.create({ key: "userKeyC" }) + bucket = subject.bucket_context(context, context.kind, "hashKey", "key", "saltyA", nil) + expect(bucket).to be_within(0.0000001).of(0.10343106) + end + + it "treats the bucket by attribute as a reference when a context kind isn't specified" do + context = LaunchDarkly::LDContext.create({ key: "userKeyB", kind: "user", address: { street: "123 Easy St", city: "Anytown" } }) + bucket = subject.bucket_context(context, context.kind, "hashKey", "/address/street", "saltyA", nil) + expect(bucket).to be_within(0.0000001).of(0.56809287) + + bucket = subject.bucket_context(context, nil, "hashKey", "/address/street", "saltyA", nil) + expect(bucket).to be_within(0.0000001).of(0) end it "can bucket by int value (equivalent to string)" do - user = { + context = LaunchDarkly::LDContext.create({ key: "userkey", custom: { stringAttr: "33333", - intAttr: 33333 - } - } - stringResult = subject.bucket_user(user, "hashKey", "stringAttr", "saltyA", nil) - intResult = subject.bucket_user(user, "hashKey", "intAttr", "saltyA", nil) + intAttr: 33333, + }, + }) + stringResult = subject.bucket_context(context, context.kind, "hashKey", "stringAttr", "saltyA", nil) + intResult = subject.bucket_context(context, context.kind, "hashKey", "intAttr", "saltyA", nil) expect(intResult).to be_within(0.0000001).of(0.54771423) expect(intResult).to eq(stringResult) end it "cannot bucket by float value" do - user = { + context = LaunchDarkly::LDContext.create({ key: "userkey", custom: { - floatAttr: 33.5 - } - } - result = subject.bucket_user(user, "hashKey", "floatAttr", "saltyA", nil) + floatAttr: 33.5, + }, + }) + result = subject.bucket_context(context, context.kind, "hashKey", "floatAttr", "saltyA", nil) expect(result).to eq(0.0) end it "cannot bucket by bool value" do - user = { + context = LaunchDarkly::LDContext.create({ key: "userkey", custom: { - boolAttr: true - } - } - result = subject.bucket_user(user, "hashKey", "boolAttr", "saltyA", nil) + boolAttr: true, + }, + }) + result = subject.bucket_context(context, context.kind, "hashKey", "boolAttr", "saltyA", nil) expect(result).to eq(0.0) end end - describe "variation_index_for_user" do + describe "variation_index_for_context" do context "rollout is not an experiment" do it "matches bucket" do - user = { key: "userkey" } + context = LaunchDarkly::LDContext.create({ key: "userkey" }) flag_key = "flagkey" salt = "salt" # First verify that with our test inputs, the bucket value will be greater than zero and less than 100000, # so we can construct a rollout whose second bucket just barely contains that value - bucket_value = (subject.bucket_user(user, flag_key, "key", salt, nil) * 100000).truncate() + bucket_value = (subject.bucket_context(context, context.kind, flag_key, "key", salt, nil) * 100000).truncate() expect(bucket_value).to be > 0 expect(bucket_value).to be < 100000 bad_variation_a = 0 matched_variation = 1 bad_variation_b = 2 - rule = { - rollout: { + vr = LaunchDarkly::Impl::Model::VariationOrRollout.new(nil, + { variations: [ { variation: bad_variation_a, weight: bucket_value }, # end of bucket range is not inclusive, so it will *not* match the target value { variation: matched_variation, weight: 1 }, # size of this bucket is 1, so it only matches that specific value - { variation: bad_variation_b, weight: 100000 - (bucket_value + 1) } - ] - } - } - flag = { key: flag_key, salt: salt } + { variation: bad_variation_b, weight: 100000 - (bucket_value + 1) }, + ], + }) + flag = Flags.from_hash({ key: flag_key, salt: salt }) - result_variation, inExperiment = subject.variation_index_for_user(flag, rule, user) + result_variation, inExperiment = subject.variation_index_for_context(flag, vr, context) expect(result_variation).to be matched_variation expect(inExperiment).to be(false) end it "uses last bucket if bucket value is equal to total weight" do - user = { key: "userkey" } + context = LaunchDarkly::LDContext.create({ key: "userkey" }) flag_key = "flagkey" salt = "salt" - bucket_value = (subject.bucket_user(user, flag_key, "key", salt, nil) * 100000).truncate() + bucket_value = (subject.bucket_context(context, context.kind, flag_key, "key", salt, nil) * 100000).truncate() # We'll construct a list of variations that stops right at the target bucket value - rule = { - rollout: { + vr = LaunchDarkly::Impl::Model::VariationOrRollout.new(nil, + { variations: [ - { variation: 0, weight: bucket_value } - ] - } - } - flag = { key: flag_key, salt: salt } + { variation: 0, weight: bucket_value }, + ], + }) + flag = Flags.from_hash({ key: flag_key, salt: salt }) - result_variation, inExperiment = subject.variation_index_for_user(flag, rule, user) + result_variation, inExperiment = subject.variation_index_for_context(flag, vr, context) expect(result_variation).to be 0 expect(inExperiment).to be(false) end @@ -155,60 +163,57 @@ end context "rollout is an experiment" do - it "returns whether user is in the experiment or not" do - user1 = { key: "userKeyA" } - user2 = { key: "userKeyB" } - user3 = { key: "userKeyC" } + it "returns whether context is in the experiment or not" do + context1 = LaunchDarkly::LDContext.create({ key: "userKeyA" }) + context2 = LaunchDarkly::LDContext.create({ key: "userKeyB" }) + context3 = LaunchDarkly::LDContext.create({ key: "userKeyC" }) flag_key = "flagkey" salt = "salt" seed = 61 - - rule = { - rollout: { + vr = LaunchDarkly::Impl::Model::VariationOrRollout.new(nil, + { seed: seed, kind: 'experiment', variations: [ { variation: 0, weight: 10000, untracked: false }, { variation: 2, weight: 20000, untracked: false }, - { variation: 0, weight: 70000 , untracked: true } - ] - } - } - flag = { key: flag_key, salt: salt } + { variation: 0, weight: 70000 , untracked: true }, + ], + }) + flag = Flags.from_hash({ key: flag_key, salt: salt }) - result_variation, inExperiment = subject.variation_index_for_user(flag, rule, user1) + result_variation, inExperiment = subject.variation_index_for_context(flag, vr, context1) expect(result_variation).to be(0) expect(inExperiment).to be(true) - result_variation, inExperiment = subject.variation_index_for_user(flag, rule, user2) + result_variation, inExperiment = subject.variation_index_for_context(flag, vr, context2) expect(result_variation).to be(2) expect(inExperiment).to be(true) - result_variation, inExperiment = subject.variation_index_for_user(flag, rule, user3) + result_variation, inExperiment = subject.variation_index_for_context(flag, vr, context3) expect(result_variation).to be(0) expect(inExperiment).to be(false) end it "uses last bucket if bucket value is equal to total weight" do - user = { key: "userkey" } + context = LaunchDarkly::LDContext.create({ key: "userkey" }) flag_key = "flagkey" salt = "salt" seed = 61 - bucket_value = (subject.bucket_user(user, flag_key, "key", salt, seed) * 100000).truncate() + bucket_value = (subject.bucket_context(context, context.kind, flag_key, "key", salt, seed) * 100000).truncate() # We'll construct a list of variations that stops right at the target bucket value - rule = { - rollout: { + vr = LaunchDarkly::Impl::Model::VariationOrRollout.new(nil, + { seed: seed, kind: 'experiment', variations: [ - { variation: 0, weight: bucket_value, untracked: false } - ] - } - } - flag = { key: flag_key, salt: salt } + { variation: 0, weight: bucket_value, untracked: false }, + ], + }) + flag = Flags.from_hash({ key: flag_key, salt: salt }) - result_variation, inExperiment = subject.variation_index_for_user(flag, rule, user) + result_variation, inExperiment = subject.variation_index_for_context(flag, vr, context) expect(result_variation).to be 0 expect(inExperiment).to be(true) end diff --git a/spec/impl/evaluator_clause_spec.rb b/spec/impl/evaluator_clause_spec.rb index facf68de..e6245c7e 100644 --- a/spec/impl/evaluator_clause_spec.rb +++ b/spec/impl/evaluator_clause_spec.rb @@ -3,51 +3,84 @@ module LaunchDarkly module Impl - evaluator_tests_with_and_without_preprocessing "Evaluator (clauses)" do |desc, factory| - describe "#{desc} - evaluate", :evaluator_spec_base => true do + describe "Evaluator (clauses)" do + describe "evaluate", :evaluator_spec_base => true do it "can match built-in attribute" do - user = { key: 'x', name: 'Bob' } + context = LDContext.create({ key: 'x', name: 'Bob' }) clause = { attribute: 'name', op: 'in', values: ['Bob'] } - flag = factory.boolean_flag_with_clauses([clause]) - expect(basic_evaluator.evaluate(flag, user).detail.value).to be true + flag = Flags.boolean_flag_with_clauses(clause) + expect(basic_evaluator.evaluate(flag, context).detail.value).to be true end it "can match custom attribute" do - user = { key: 'x', name: 'Bob', custom: { legs: 4 } } + context = LDContext.create({ key: 'x', name: 'Bob', custom: { legs: 4 } }) clause = { attribute: 'legs', op: 'in', values: [4] } - flag = factory.boolean_flag_with_clauses([clause]) - expect(basic_evaluator.evaluate(flag, user).detail.value).to be true + flag = Flags.boolean_flag_with_clauses(clause) + expect(basic_evaluator.evaluate(flag, context).detail.value).to be true end it "returns false for missing attribute" do - user = { key: 'x', name: 'Bob' } + context = LDContext.create({ key: 'x', name: 'Bob' }) clause = { attribute: 'legs', op: 'in', values: [4] } - flag = factory.boolean_flag_with_clauses([clause]) - expect(basic_evaluator.evaluate(flag, user).detail.value).to be false + flag = Flags.boolean_flag_with_clauses(clause) + expect(basic_evaluator.evaluate(flag, context).detail.value).to be false end it "returns false for unknown operator" do - user = { key: 'x', name: 'Bob' } + context = LDContext.create({ key: 'x', name: 'Bob' }) clause = { attribute: 'name', op: 'unknown', values: [4] } - flag = factory.boolean_flag_with_clauses([clause]) - expect(basic_evaluator.evaluate(flag, user).detail.value).to be false + flag = Flags.boolean_flag_with_clauses(clause) + expect(basic_evaluator.evaluate(flag, context).detail.value).to be false end it "does not stop evaluating rules after clause with unknown operator" do - user = { key: 'x', name: 'Bob' } + context = LDContext.create({ key: 'x', name: 'Bob' }) clause0 = { attribute: 'name', op: 'unknown', values: [4] } rule0 = { clauses: [ clause0 ], variation: 1 } clause1 = { attribute: 'name', op: 'in', values: ['Bob'] } rule1 = { clauses: [ clause1 ], variation: 1 } - flag = factory.boolean_flag_with_rules([rule0, rule1]) - expect(basic_evaluator.evaluate(flag, user).detail.value).to be true + flag = Flags.boolean_flag_with_rules(rule0, rule1) + expect(basic_evaluator.evaluate(flag, context).detail.value).to be true end it "can be negated" do - user = { key: 'x', name: 'Bob' } + context = LDContext.create({ key: 'x', name: 'Bob' }) clause = { attribute: 'name', op: 'in', values: ['Bob'], negate: true } - flag = factory.boolean_flag_with_clauses([clause]) - expect(basic_evaluator.evaluate(flag, user).detail.value).to be false + flag = Flags.boolean_flag_with_clauses(clause) + expect(basic_evaluator.evaluate(flag, context).detail.value).to be false + end + + it "clause match uses context kind" do + clause = { contextKind: 'company', attribute: 'name', op: 'in', values: ['Catco'] } + + context1 = LDContext.create({ key: 'cc', kind: 'company', name: 'Catco'}) + context2 = LDContext.create({ key: 'l', kind: 'user', name: 'Lucy' }) + context3 = LDContext.create_multi([context1, context2]) + + flag = Flags.boolean_flag_with_clauses(clause) + + expect(basic_evaluator.evaluate(flag, context1).detail.value).to be true + expect(basic_evaluator.evaluate(flag, context2).detail.value).to be false + expect(basic_evaluator.evaluate(flag, context3).detail.value).to be true + end + + it "clause match by kind attribute" do + clause = { attribute: 'kind', op: 'startsWith', values: ['a'] } + + context1 = LDContext.create({ key: 'key' }) + context2 = LDContext.create({ key: 'key', kind: 'ab' }) + context3 = LDContext.create_multi( + [ + LDContext.create({ key: 'key', kind: 'cd' }), + LDContext.create({ key: 'key', kind: 'ab' }), + ] + ) + + flag = Flags.boolean_flag_with_clauses(clause) + + expect(basic_evaluator.evaluate(flag, context1).detail.value).to be false + expect(basic_evaluator.evaluate(flag, context2).detail.value).to be true + expect(basic_evaluator.evaluate(flag, context3).detail.value).to be true end end end diff --git a/spec/impl/evaluator_operators_spec.rb b/spec/impl/evaluator_operators_spec.rb index 5c447e6f..a38cb3f4 100644 --- a/spec/impl/evaluator_operators_spec.rb +++ b/spec/impl/evaluator_operators_spec.rb @@ -40,16 +40,16 @@ [ :contains, "y", "xyz", false ], # mixed strings and numbers - [ :in, "99", 99, false ], + [ :in, "99", 99, false ], [ :in, 99, "99", false ], - [ :contains, "99", 99, false ], - [ :startsWith, "99", 99, false ], - [ :endsWith, "99", 99, false ], - [ :lessThanOrEqual, "99", 99, false ], - [ :lessThanOrEqual, 99, "99", false ], - [ :greaterThanOrEqual, "99", 99, false ], - [ :greaterThanOrEqual, 99, "99", false ], - + [ :contains, "99", 99, false ], + [ :startsWith, "99", 99, false ], + [ :endsWith, "99", 99, false ], + [ :lessThanOrEqual, "99", 99, false ], + [ :lessThanOrEqual, 99, "99", false ], + [ :greaterThanOrEqual, "99", 99, false ], + [ :greaterThanOrEqual, 99, "99", false ], + # regex [ :matches, "hello world", "hello.*rld", true ], [ :matches, "hello world", "hello.*orl", true ], @@ -90,7 +90,7 @@ [ :semVerGreaterThan, "2.0", "2.0.1", false ], [ :semVerGreaterThan, "2.0.0-rc.1", "2.0.0-rc.0", true ], [ :semVerLessThan, "2.0.1", "xbad%ver", false ], - [ :semVerGreaterThan, "2.0.1", "xbad%ver", false ] + [ :semVerGreaterThan, "2.0.1", "xbad%ver", false ], ] operatorTests.each do |params| @@ -103,39 +103,4 @@ end end end - - describe "user_value" do - [:key, :secondary, :ip, :country, :email, :firstName, :lastName, :avatar, :name, :anonymous, :some_custom_attr].each do |attr| - it "returns nil if property #{attr} is not defined" do - expect(subject::user_value({}, attr)).to be nil - end - end - - [:key, :secondary, :ip, :country, :email, :firstName, :lastName, :avatar, :name].each do |attr| - it "gets string value of string property #{attr}" do - expect(subject::user_value({ attr => 'x' }, attr)).to eq 'x' - end - - it "coerces non-string value of property #{attr} to string" do - expect(subject::user_value({ attr => 3 }, attr)).to eq '3' - end - end - - it "gets boolean value of property anonymous" do - expect(subject::user_value({ anonymous: true }, :anonymous)).to be true - expect(subject::user_value({ anonymous: false }, :anonymous)).to be false - end - - it "does not coerces non-boolean value of property anonymous" do - expect(subject::user_value({ anonymous: 3 }, :anonymous)).to eq 3 - end - - it "gets string value of custom property" do - expect(subject::user_value({ custom: { some_custom_attr: 'x' } }, :some_custom_attr)).to eq 'x' - end - - it "gets non-string value of custom property" do - expect(subject::user_value({ custom: { some_custom_attr: 3 } }, :some_custom_attr)).to eq 3 - end - end end diff --git a/spec/impl/evaluator_prereq_spec.rb b/spec/impl/evaluator_prereq_spec.rb new file mode 100644 index 00000000..3440f916 --- /dev/null +++ b/spec/impl/evaluator_prereq_spec.rb @@ -0,0 +1,202 @@ +require "spec_helper" +require "impl/evaluator_spec_base" + +module LaunchDarkly + module Impl + describe "evaluate", :evaluator_spec_base => true do + it "returns off variation if prerequisite is not found" do + flag = Flags.from_hash( + { + key: 'feature0', + on: true, + prerequisites: [{ key: 'badfeature', variation: 1 }], + fallthrough: { variation: 0 }, + offVariation: 1, + variations: %w[a b c], + } + ) + context = LDContext.create({ key: 'x' }) + detail = EvaluationDetail.new('b', 1, EvaluationReason::prerequisite_failed('badfeature')) + e = EvaluatorBuilder.new(logger).with_unknown_flag('badfeature').build + result = e.evaluate(flag, context) + expect(result.detail).to eq(detail) + expect(result.prereq_evals).to eq(nil) + end + + it "reuses prerequisite-failed result detail instances" do + flag = Flags.from_hash( + { + key: 'feature0', + on: true, + prerequisites: [{ key: 'badfeature', variation: 1 }], + fallthrough: { variation: 0 }, + offVariation: 1, + variations: %w[a b c], + } + ) + context = LDContext.create({ key: 'x' }) + e = EvaluatorBuilder.new(logger).with_unknown_flag('badfeature').build + result1 = e.evaluate(flag, context) + expect(result1.detail.reason).to eq EvaluationReason::prerequisite_failed('badfeature') + result2 = e.evaluate(flag, context) + expect(result2.detail).to be result1.detail + end + + it "returns off variation and event if prerequisite of a prerequisite is not found" do + flag = Flags.from_hash( + { + key: 'feature0', + on: true, + prerequisites: [{ key: 'feature1', variation: 1 }], + fallthrough: { variation: 0 }, + offVariation: 1, + variations: %w[a b c], + version: 1, + } + ) + flag1 = Flags.from_hash( + { + key: 'feature1', + on: true, + prerequisites: [{ key: 'feature2', variation: 1 }], # feature2 doesn't exist + fallthrough: { variation: 0 }, + variations: %w[d e], + version: 2, + } + ) + context = LDContext.create({ key: 'x' }) + detail = EvaluationDetail.new('b', 1, EvaluationReason::prerequisite_failed('feature1')) + expected_prereqs = [ + PrerequisiteEvalRecord.new(flag1, flag, EvaluationDetail.new(nil, nil, EvaluationReason::prerequisite_failed('feature2'))), + ] + e = EvaluatorBuilder.new(logger).with_flag(flag1).with_unknown_flag('feature2').build + result = e.evaluate(flag, context) + expect(result.detail).to eq(detail) + expect(result.prereq_evals).to eq(expected_prereqs) + end + + it "returns off variation and event if prerequisite is off" do + flag = Flags.from_hash( + { + key: 'feature0', + on: true, + prerequisites: [{ key: 'feature1', variation: 1 }], + fallthrough: { variation: 0 }, + offVariation: 1, + variations: %w[a b c], + version: 1, + } + ) + flag1 = Flags.from_hash( + { + key: 'feature1', + on: false, + # note that even though it returns the desired variation, it is still off and therefore not a match + offVariation: 1, + fallthrough: { variation: 0 }, + variations: %w[d e], + version: 2, + } + ) + context = LDContext.create({ key: 'x' }) + detail = EvaluationDetail.new('b', 1, EvaluationReason::prerequisite_failed('feature1')) + expected_prereqs = [ + PrerequisiteEvalRecord.new(flag1, flag, EvaluationDetail.new('e', 1, EvaluationReason::off)), + ] + e = EvaluatorBuilder.new(logger).with_flag(flag1).build + result = e.evaluate(flag, context) + expect(result.detail).to eq(detail) + expect(result.prereq_evals).to eq(expected_prereqs) + end + + it "returns off variation and event if prerequisite is not met" do + flag = Flags.from_hash( + { + key: 'feature0', + on: true, + prerequisites: [{ key: 'feature1', variation: 1 }], + fallthrough: { variation: 0 }, + offVariation: 1, + variations: %w[a b c], + version: 1, + } + ) + flag1 = Flags.from_hash( + { + key: 'feature1', + on: true, + fallthrough: { variation: 0 }, + variations: %w[d e], + version: 2, + } + ) + context = LDContext.create({ key: 'x' }) + detail = EvaluationDetail.new('b', 1, EvaluationReason::prerequisite_failed('feature1')) + expected_prereqs = [ + PrerequisiteEvalRecord.new(flag1, flag, EvaluationDetail.new('d', 0, EvaluationReason::fallthrough)), + ] + e = EvaluatorBuilder.new(logger).with_flag(flag1).build + result = e.evaluate(flag, context) + expect(result.detail).to eq(detail) + expect(result.prereq_evals).to eq(expected_prereqs) + end + + it "returns fallthrough variation and event if prerequisite is met and there are no rules" do + flag = Flags.from_hash( + { + key: 'feature0', + on: true, + prerequisites: [{ key: 'feature1', variation: 1 }], + fallthrough: { variation: 0 }, + offVariation: 1, + variations: %w[a b c], + version: 1, + } + ) + flag1 = Flags.from_hash( + { + key: 'feature1', + on: true, + fallthrough: { variation: 1 }, + variations: %w[d e], + version: 2, + } + ) + context = LDContext.create({ key: 'x' }) + detail = EvaluationDetail.new('a', 0, EvaluationReason::fallthrough) + expected_prereqs = [ + PrerequisiteEvalRecord.new(flag1, flag, EvaluationDetail.new('e', 1, EvaluationReason::fallthrough)), + ] + e = EvaluatorBuilder.new(logger).with_flag(flag1).build + result = e.evaluate(flag, context) + expect(result.detail).to eq(detail) + expect(result.prereq_evals).to eq(expected_prereqs) + end + + (1..4).each do |depth| + it "correctly detects cycles are at a depth of #{depth}" do + flags = [] + (0...depth).each do |i| + flags << Flags.from_hash( + { + key: "flagkey#{i}", + on: true, + offVariation: 0, + prerequisites: [{ key: "flagkey#{(i + 1) % depth}", variation: 0 }], + variations: [false, true], + } + ) + end + + builder = EvaluatorBuilder.new(logger) + flags.each { |flag| builder.with_flag(flag) } + + evaluator = builder.build + result = evaluator.evaluate(flags[0], LDContext.with_key('user')) + reason = EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG) + expect(result.detail.reason).to eq(reason) + end + end + end + end +end diff --git a/spec/impl/evaluator_rule_spec.rb b/spec/impl/evaluator_rule_spec.rb index 68e724cd..11c07ecb 100644 --- a/spec/impl/evaluator_rule_spec.rb +++ b/spec/impl/evaluator_rule_spec.rb @@ -3,60 +3,58 @@ module LaunchDarkly module Impl - evaluator_tests_with_and_without_preprocessing "Evaluator (rules)" do |desc, factory| - describe "#{desc} - evaluate", :evaluator_spec_base => true do - it "matches user from rules" do + describe "Evaluator (rules)" do + describe "evaluate", :evaluator_spec_base => true do + it "matches context from rules" do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], variation: 1 } - flag = factory.boolean_flag_with_rules([rule]) - user = { key: 'userkey' } + flag = Flags.boolean_flag_with_rules(rule) + context = LDContext.create({ key: 'userkey' }) detail = EvaluationDetail.new(true, 1, EvaluationReason::rule_match(0, 'ruleid')) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end - if factory.with_preprocessing - it "reuses rule match result detail instances" do - rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], variation: 1 } - flag = factory.boolean_flag_with_rules([rule]) - user = { key: 'userkey' } - detail = EvaluationDetail.new(true, 1, EvaluationReason::rule_match(0, 'ruleid')) - result1 = basic_evaluator.evaluate(flag, user) - result2 = basic_evaluator.evaluate(flag, user) - expect(result1.detail.reason.rule_id).to eq 'ruleid' - expect(result1.detail).to be result2.detail - end + it "reuses rule match result detail instances" do + rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], variation: 1 } + flag = Flags.boolean_flag_with_rules(rule) + context = LDContext.create({ key: 'userkey' }) + detail = EvaluationDetail.new(true, 1, EvaluationReason::rule_match(0, 'ruleid')) + result1 = basic_evaluator.evaluate(flag, context) + result2 = basic_evaluator.evaluate(flag, context) + expect(result1.detail.reason.rule_id).to eq 'ruleid' + expect(result1.detail).to be result2.detail end it "returns an error if rule variation is too high" do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], variation: 999 } - flag = factory.boolean_flag_with_rules([rule]) - user = { key: 'userkey' } + flag = Flags.boolean_flag_with_rules(rule) + context = LDContext.create({ key: 'userkey' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end it "returns an error if rule variation is negative" do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], variation: -1 } - flag = factory.boolean_flag_with_rules([rule]) - user = { key: 'userkey' } + flag = Flags.boolean_flag_with_rules(rule) + context = LDContext.create({ key: 'userkey' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end it "returns an error if rule has neither variation nor rollout" do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }] } - flag = factory.boolean_flag_with_rules([rule]) - user = { key: 'userkey' } + flag = Flags.boolean_flag_with_rules(rule) + context = LDContext.create({ key: 'userkey' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end @@ -64,66 +62,53 @@ module Impl it "returns an error if rule has a rollout with no variations" do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], rollout: { variations: [] } } - flag = factory.boolean_flag_with_rules([rule]) - user = { key: 'userkey' } + flag = Flags.boolean_flag_with_rules(rule) + context = LDContext.create({ key: 'userkey' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end - it "coerces user key to a string for evaluation" do + it "coerces context key to a string for evaluation" do clause = { attribute: 'key', op: 'in', values: ['999'] } - flag = factory.boolean_flag_with_clauses([clause]) - user = { key: 999 } - result = basic_evaluator.evaluate(flag, user) + flag = Flags.boolean_flag_with_clauses(clause) + context = LDContext.create({ key: 999 }) + result = basic_evaluator.evaluate(flag, context) expect(result.detail.value).to eq(true) end - it "coerces secondary key to a string for evaluation" do - # We can't really verify that the rollout calculation works correctly, but we can at least - # make sure it doesn't error out if there's a non-string secondary value (ch35189) - rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], - rollout: { salt: '', variations: [ { weight: 100000, variation: 1 } ] } } - flag = factory.boolean_flag_with_rules([rule]) - user = { key: "userkey", secondary: 999 } - result = basic_evaluator.evaluate(flag, user) - expect(result.detail.reason).to eq(EvaluationReason::rule_match(0, 'ruleid')) - end - describe "rule experiment/rollout behavior" do it "evaluates rollout for rule" do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], rollout: { variations: [ { weight: 100000, variation: 1, untracked: false } ] } } - flag = factory.boolean_flag_with_rules([rule]) - user = { key: 'userkey' } + flag = Flags.boolean_flag_with_rules(rule) + context = LDContext.create({ key: 'userkey' }) detail = EvaluationDetail.new(true, 1, EvaluationReason::rule_match(0, 'ruleid')) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end - if factory.with_preprocessing - it "reuses rule rollout result detail instance" do - rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], - rollout: { variations: [ { weight: 100000, variation: 1, untracked: false } ] } } - flag = factory.boolean_flag_with_rules([rule]) - user = { key: 'userkey' } - detail = EvaluationDetail.new(true, 1, EvaluationReason::rule_match(0, 'ruleid')) - result1 = basic_evaluator.evaluate(flag, user) - result2 = basic_evaluator.evaluate(flag, user) - expect(result1.detail).to eq(detail) - expect(result2.detail).to be(result1.detail) - end + it "reuses rule rollout result detail instance" do + rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], + rollout: { variations: [ { weight: 100000, variation: 1, untracked: false } ] } } + flag = Flags.boolean_flag_with_rules(rule) + context = LDContext.create({ key: 'userkey' }) + detail = EvaluationDetail.new(true, 1, EvaluationReason::rule_match(0, 'ruleid')) + result1 = basic_evaluator.evaluate(flag, context) + result2 = basic_evaluator.evaluate(flag, context) + expect(result1.detail).to eq(detail) + expect(result2.detail).to be(result1.detail) end it "sets the in_experiment value if rollout kind is experiment " do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], rollout: { kind: 'experiment', variations: [ { weight: 100000, variation: 1, untracked: false } ] } } - flag = factory.boolean_flag_with_rules([rule]) - user = { key: "userkey", secondary: 999 } - result = basic_evaluator.evaluate(flag, user) + flag = Flags.boolean_flag_with_rules(rule) + context = LDContext.create({ key: "userkey" }) + result = basic_evaluator.evaluate(flag, context) expect(result.detail.reason.to_json).to include('"inExperiment":true') expect(result.detail.reason.in_experiment).to eq(true) end @@ -131,9 +116,9 @@ module Impl it "does not set the in_experiment value if rollout kind is not experiment " do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], rollout: { kind: 'rollout', variations: [ { weight: 100000, variation: 1, untracked: false } ] } } - flag = factory.boolean_flag_with_rules([rule]) - user = { key: "userkey", secondary: 999 } - result = basic_evaluator.evaluate(flag, user) + flag = Flags.boolean_flag_with_rules(rule) + context = LDContext.create({ key: "userkey" }) + result = basic_evaluator.evaluate(flag, context) expect(result.detail.reason.to_json).to_not include('"inExperiment":true') expect(result.detail.reason.in_experiment).to eq(nil) end @@ -141,9 +126,9 @@ module Impl it "does not set the in_experiment value if rollout kind is experiment and untracked is true" do rule = { id: 'ruleid', clauses: [{ attribute: 'key', op: 'in', values: ['userkey'] }], rollout: { kind: 'experiment', variations: [ { weight: 100000, variation: 1, untracked: true } ] } } - flag = factory.boolean_flag_with_rules([rule]) - user = { key: "userkey", secondary: 999 } - result = basic_evaluator.evaluate(flag, user) + flag = Flags.boolean_flag_with_rules(rule) + context = LDContext.create({ key: "userkey" }) + result = basic_evaluator.evaluate(flag, context) expect(result.detail.reason.to_json).to_not include('"inExperiment":true') expect(result.detail.reason.in_experiment).to eq(nil) end diff --git a/spec/impl/evaluator_segment_spec.rb b/spec/impl/evaluator_segment_spec.rb index 70d86546..bc23c3d7 100644 --- a/spec/impl/evaluator_segment_spec.rb +++ b/spec/impl/evaluator_segment_spec.rb @@ -1,15 +1,16 @@ +require "model_builders" require "spec_helper" require "impl/evaluator_spec_base" module LaunchDarkly module Impl - evaluator_tests_with_and_without_preprocessing "Evaluator (segments)" do |desc, factory| - describe "#{desc} - evaluate", :evaluator_spec_base => true do - def test_segment_match(factory, segment) - clause = make_segment_match_clause(segment) - flag = factory.boolean_flag_with_clauses([clause]) + describe "Evaluator (segments)" do + describe "evaluate", :evaluator_spec_base => true do + def test_segment_match(segment, context) + clause = Clauses.match_segment(segment) + flag = Flags.boolean_flag_with_clauses(clause) e = EvaluatorBuilder.new(logger).with_segment(segment).build - e.evaluate(flag, user).detail.value + e.evaluate(flag, context).detail.value end it "retrieves segment from segment store for segmentMatch operator" do @@ -17,103 +18,179 @@ def test_segment_match(factory, segment) key: 'segkey', included: [ 'userkey' ], version: 1, - deleted: false + deleted: false, } e = EvaluatorBuilder.new(logger).with_segment(segment).build - flag = factory.boolean_flag_with_clauses([make_segment_match_clause(segment)]) - expect(e.evaluate(flag, user).detail.value).to be true + flag = Flags.boolean_flag_with_clauses(Clauses.match_segment(segment)) + expect(e.evaluate(flag, user_context).detail.value).to be true end it "falls through with no errors if referenced segment is not found" do e = EvaluatorBuilder.new(logger).with_unknown_segment('segkey').build clause = { attribute: '', op: 'segmentMatch', values: ['segkey'] } - flag = factory.boolean_flag_with_clauses([clause]) - expect(e.evaluate(flag, user).detail.value).to be false + flag = Flags.boolean_flag_with_clauses(clause) + expect(e.evaluate(flag, user_context).detail.value).to be false end - it 'explicitly includes user' do - segment = make_segment('segkey') - segment[:included] = [ user[:key] ] - expect(test_segment_match(factory, segment)).to be true + it 'explicitly includes context' do + segment = SegmentBuilder.new('segkey').included(user_context.key).build + expect(test_segment_match(segment, user_context)).to be true end - it 'explicitly excludes user' do - segment = make_segment('segkey') - segment[:excluded] = [ user[:key] ] - expect(test_segment_match(factory, segment)).to be false + it 'explicitly includes a specific context kind' do + org_context = LDContext::create({ key: "orgkey", kind: "org" }) + device_context = LDContext::create({ key: "devicekey", kind: "device" }) + multi_context = LDContext::create_multi([org_context, device_context]) + + segment = SegmentBuilder.new('segkey') + .included_contexts("org", "orgkey") + .build + + expect(test_segment_match(segment, org_context)).to be true + expect(test_segment_match(segment, device_context)).to be false + expect(test_segment_match(segment, multi_context)).to be true + end + + it 'explicitly excludes context' do + segment = SegmentBuilder.new('segkey').excluded(user_context.key).build + expect(test_segment_match(segment, user_context)).to be false end - it 'both includes and excludes user; include takes priority' do - segment = make_segment('segkey') - segment[:included] = [ user[:key] ] - segment[:excluded] = [ user[:key] ] - expect(test_segment_match(factory, segment)).to be true + it 'explicitly excludes a specific context kind' do + org_context = LDContext::create({ key: "orgkey", kind: "org" }) + device_context = LDContext::create({ key: "devicekey", kind: "device" }) + multi_context = LDContext::create_multi([org_context, device_context]) + + org_clause = Clauses.match_context(org_context, :key) + device_clause = Clauses.match_context(device_context, :key) + segment = SegmentBuilder.new('segkey') + .excluded_contexts("org", "orgkey") + .rule({ clauses: [ org_clause ]}) + .rule({ clauses: [ device_clause ]}) + .build + + expect(test_segment_match(segment, org_context)).to be false + expect(test_segment_match(segment, device_context)).to be true + expect(test_segment_match(segment, multi_context)).to be false end - it 'matches user by rule when weight is absent' do - segClause = make_user_matching_clause(user, :email) + it 'both includes and excludes context; include takes priority' do + segment = SegmentBuilder.new('segkey').included(user_context.key).excluded(user_context.key).build + expect(test_segment_match(segment, user_context)).to be true + end + + it 'matches context by rule when weight is absent' do + segClause = Clauses.match_context(user_context, :email) segRule = { - clauses: [ segClause ] + clauses: [ segClause ], } - segment = make_segment('segkey') - segment[:rules] = [ segRule ] - expect(test_segment_match(factory, segment)).to be true + segment = SegmentBuilder.new('segkey').rule(segRule).build + expect(test_segment_match(segment, user_context)).to be true end - it 'matches user by rule when weight is nil' do - segClause = make_user_matching_clause(user, :email) + it 'matches context by rule when weight is nil' do + segClause = Clauses.match_context(user_context, :email) segRule = { clauses: [ segClause ], - weight: nil + weight: nil, } - segment = make_segment('segkey') - segment[:rules] = [ segRule ] - expect(test_segment_match(factory, segment)).to be true + segment = SegmentBuilder.new('segkey').rule(segRule).build + expect(test_segment_match(segment, user_context)).to be true end - it 'matches user with full rollout' do - segClause = make_user_matching_clause(user, :email) + it 'matches context with full rollout' do + segClause = Clauses.match_context(user_context, :email) segRule = { clauses: [ segClause ], - weight: 100000 + weight: 100000, } - segment = make_segment('segkey') - segment[:rules] = [ segRule ] - expect(test_segment_match(factory, segment)).to be true + segment = SegmentBuilder.new('segkey').rule(segRule).build + expect(test_segment_match(segment, user_context)).to be true end - it "doesn't match user with zero rollout" do - segClause = make_user_matching_clause(user, :email) + it "doesn't match context with zero rollout" do + segClause = Clauses.match_context(user_context, :email) segRule = { clauses: [ segClause ], - weight: 0 + weight: 0, } - segment = make_segment('segkey') - segment[:rules] = [ segRule ] - expect(test_segment_match(factory, segment)).to be false + segment = SegmentBuilder.new('segkey').rule(segRule).build + expect(test_segment_match(segment, user_context)).to be false end - it "matches user with multiple clauses" do - segClause1 = make_user_matching_clause(user, :email) - segClause2 = make_user_matching_clause(user, :name) + it "matches context with multiple clauses" do + segClause1 = Clauses.match_context(user_context, :email) + segClause2 = Clauses.match_context(user_context, :name) segRule = { - clauses: [ segClause1, segClause2 ] + clauses: [ segClause1, segClause2 ], } - segment = make_segment('segkey') - segment[:rules] = [ segRule ] - expect(test_segment_match(factory, segment)).to be true + segment = SegmentBuilder.new('segkey').rule(segRule).build + expect(test_segment_match(segment, user_context)).to be true end - it "doesn't match user with multiple clauses if a clause doesn't match" do - segClause1 = make_user_matching_clause(user, :email) - segClause2 = make_user_matching_clause(user, :name) + it "doesn't match context with multiple clauses if a clause doesn't match" do + segClause1 = Clauses.match_context(user_context, :email) + segClause2 = Clauses.match_context(user_context, :name) segClause2[:values] = [ 'wrong' ] segRule = { - clauses: [ segClause1, segClause2 ] + clauses: [ segClause1, segClause2 ], } - segment = make_segment('segkey') - segment[:rules] = [ segRule ] - expect(test_segment_match(factory, segment)).to be false + segment = SegmentBuilder.new('segkey').rule(segRule).build + expect(test_segment_match(segment, user_context)).to be false + end + + (1..4).each do |depth| + it "can handle segments referencing other segments" do + context = LDContext.with_key("context") + segments = [] + (0...depth).each do |i| + builder = SegmentBuilder.new("segmentkey#{i}") + if i == depth - 1 + builder.included(context.key) + else + clause = Clauses.match_segment("segmentkey#{i + 1}") + builder.rule( + SegmentRuleBuilder.new.clause(clause) + ) + end + + segments << builder.build + end + + flag = Flags.boolean_flag_with_clauses(Clauses.match_segment("segmentkey0")) + + builder = EvaluatorBuilder.new(logger) + segments.each { |segment| builder.with_segment(segment) } + + evaluator = builder.build + result = evaluator.evaluate(flag, context) + expect(result.detail.value).to be(true) + + end + + it "will detect cycles in segments" do + context = LDContext.with_key("context") + segments = [] + (0...depth).each do |i| + clause = Clauses.match_segment("segmentkey#{(i + 1) % depth}") + builder = SegmentBuilder.new("segmentkey#{i}") + builder.rule( + SegmentRuleBuilder.new.clause(clause) + ) + + segments << builder.build + end + + flag = Flags.boolean_flag_with_clauses(Clauses.match_segment("segmentkey0")) + + builder = EvaluatorBuilder.new(logger) + segments.each { |segment| builder.with_segment(segment) } + + evaluator = builder.build + result = evaluator.evaluate(flag, context) + reason = EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG) + expect(result.detail.reason).to eq(reason) + end end end end diff --git a/spec/impl/evaluator_spec.rb b/spec/impl/evaluator_spec.rb index 7ac31728..47a933d3 100644 --- a/spec/impl/evaluator_spec.rb +++ b/spec/impl/evaluator_spec.rb @@ -5,240 +5,106 @@ module LaunchDarkly module Impl - evaluator_tests_with_and_without_preprocessing "Evaluator (general)" do |desc, factory| - describe "#{desc} - evaluate", :evaluator_spec_base => true do + describe "Evaluator (general)" do + describe "evaluate", :evaluator_spec_base => true do it "returns off variation if flag is off" do - flag = factory.flag({ + flag = Flags.from_hash({ key: 'feature', on: false, offVariation: 1, fallthrough: { variation: 0 }, - variations: ['a', 'b', 'c'] + variations: ['a', 'b', 'c'], }) - user = { key: 'x' } + context = LDContext.create({ key: 'x' }) detail = EvaluationDetail.new('b', 1, EvaluationReason::off) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end it "returns nil if flag is off and off variation is unspecified" do - flag = factory.flag({ + flag = Flags.from_hash({ key: 'feature', on: false, fallthrough: { variation: 0 }, - variations: ['a', 'b', 'c'] + variations: ['a', 'b', 'c'], }) - user = { key: 'x' } + context = LDContext.create({ key: 'x' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::off) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end - if factory.with_preprocessing - it "reuses off result detail instance" do - flag = factory.flag({ - key: 'feature', - on: false, - offVariation: 1, - fallthrough: { variation: 0 }, - variations: ['a', 'b', 'c'] - }) - user = { key: 'x' } - detail = EvaluationDetail.new('b', 1, EvaluationReason::off) - result1 = basic_evaluator.evaluate(flag, user) - result2 = basic_evaluator.evaluate(flag, user) - expect(result1.detail).to eq(detail) - expect(result2.detail).to be(result1.detail) - end + it "reuses off result detail instance" do + flag = Flags.from_hash({ + key: 'feature', + on: false, + offVariation: 1, + fallthrough: { variation: 0 }, + variations: ['a', 'b', 'c'], + }) + context = LDContext.create({ key: 'x' }) + detail = EvaluationDetail.new('b', 1, EvaluationReason::off) + result1 = basic_evaluator.evaluate(flag, context) + result2 = basic_evaluator.evaluate(flag, context) + expect(result1.detail).to eq(detail) + expect(result2.detail).to be(result1.detail) end it "returns an error if off variation is too high" do - flag = factory.flag({ + flag = Flags.from_hash({ key: 'feature', on: false, offVariation: 999, fallthrough: { variation: 0 }, - variations: ['a', 'b', 'c'] + variations: ['a', 'b', 'c'], }) - user = { key: 'x' } + context = LDContext.create({ key: 'x' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end it "returns an error if off variation is negative" do - flag = factory.flag({ + flag = Flags.from_hash({ key: 'feature', on: false, offVariation: -1, fallthrough: { variation: 0 }, - variations: ['a', 'b', 'c'] + variations: ['a', 'b', 'c'], }) - user = { key: 'x' } + context = LDContext.create({ key: 'x' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) - result = basic_evaluator.evaluate(flag, user) - expect(result.detail).to eq(detail) - expect(result.prereq_evals).to eq(nil) - end - - it "returns off variation if prerequisite is not found" do - flag = factory.flag({ - key: 'feature0', - on: true, - prerequisites: [{key: 'badfeature', variation: 1}], - fallthrough: { variation: 0 }, - offVariation: 1, - variations: ['a', 'b', 'c'] - }) - user = { key: 'x' } - detail = EvaluationDetail.new('b', 1, EvaluationReason::prerequisite_failed('badfeature')) - e = EvaluatorBuilder.new(logger).with_unknown_flag('badfeature').build - result = e.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end - if factory.with_preprocessing - it "reuses prerequisite-failed result detail instances" do - flag = factory.flag({ - key: 'feature0', - on: true, - prerequisites: [{key: 'badfeature', variation: 1}], - fallthrough: { variation: 0 }, - offVariation: 1, - variations: ['a', 'b', 'c'] - }) - user = { key: 'x' } - e = EvaluatorBuilder.new(logger).with_unknown_flag('badfeature').build - result1 = e.evaluate(flag, user) - expect(result1.detail.reason).to eq EvaluationReason::prerequisite_failed('badfeature') - result2 = e.evaluate(flag, user) - expect(result2.detail).to be result1.detail - end - end - - it "returns off variation and event if prerequisite of a prerequisite is not found" do - flag = factory.flag({ - key: 'feature0', - on: true, - prerequisites: [{key: 'feature1', variation: 1}], - fallthrough: { variation: 0 }, - offVariation: 1, - variations: ['a', 'b', 'c'], - version: 1 - }) - flag1 = factory.flag({ - key: 'feature1', - on: true, - prerequisites: [{key: 'feature2', variation: 1}], # feature2 doesn't exist - fallthrough: { variation: 0 }, - variations: ['d', 'e'], - version: 2 - }) - user = { key: 'x' } - detail = EvaluationDetail.new('b', 1, EvaluationReason::prerequisite_failed('feature1')) - expected_prereqs = [ - PrerequisiteEvalRecord.new(flag1, flag, EvaluationDetail.new(nil, nil, EvaluationReason::prerequisite_failed('feature2'))) - ] - e = EvaluatorBuilder.new(logger).with_flag(flag1).with_unknown_flag('feature2').build - result = e.evaluate(flag, user) - expect(result.detail).to eq(detail) - expect(result.prereq_evals).to eq(expected_prereqs) - end - - it "returns off variation and event if prerequisite is off" do - flag = factory.flag({ - key: 'feature0', - on: true, - prerequisites: [{key: 'feature1', variation: 1}], - fallthrough: { variation: 0 }, - offVariation: 1, - variations: ['a', 'b', 'c'], - version: 1 - }) - flag1 = factory.flag({ - key: 'feature1', - on: false, - # note that even though it returns the desired variation, it is still off and therefore not a match - offVariation: 1, - fallthrough: { variation: 0 }, - variations: ['d', 'e'], - version: 2 - }) - user = { key: 'x' } - detail = EvaluationDetail.new('b', 1, EvaluationReason::prerequisite_failed('feature1')) - expected_prereqs = [ - PrerequisiteEvalRecord.new(flag1, flag, EvaluationDetail.new('e', 1, EvaluationReason::off)) - ] - e = EvaluatorBuilder.new(logger).with_flag(flag1).build - result = e.evaluate(flag, user) - expect(result.detail).to eq(detail) - expect(result.prereq_evals).to eq(expected_prereqs) - end - - it "returns off variation and event if prerequisite is not met" do - flag = factory.flag({ - key: 'feature0', - on: true, - prerequisites: [{key: 'feature1', variation: 1}], - fallthrough: { variation: 0 }, - offVariation: 1, - variations: ['a', 'b', 'c'], - version: 1 - }) - flag1 = factory.flag({ - key: 'feature1', - on: true, - fallthrough: { variation: 0 }, - variations: ['d', 'e'], - version: 2 - }) - user = { key: 'x' } - detail = EvaluationDetail.new('b', 1, EvaluationReason::prerequisite_failed('feature1')) - expected_prereqs = [ - PrerequisiteEvalRecord.new(flag1, flag, EvaluationDetail.new('d', 0, EvaluationReason::fallthrough)) - ] - e = EvaluatorBuilder.new(logger).with_flag(flag1).build - result = e.evaluate(flag, user) - expect(result.detail).to eq(detail) - expect(result.prereq_evals).to eq(expected_prereqs) - end - - it "returns fallthrough variation and event if prerequisite is met and there are no rules" do - flag = factory.flag({ + it "returns fallthrough variation if flag is on and no rules match" do + flag = Flags.from_hash({ key: 'feature0', on: true, - prerequisites: [{key: 'feature1', variation: 1}], fallthrough: { variation: 0 }, offVariation: 1, variations: ['a', 'b', 'c'], - version: 1 - }) - flag1 = factory.flag({ - key: 'feature1', - on: true, - fallthrough: { variation: 1 }, - variations: ['d', 'e'], - version: 2 + version: 1, + rules: [ + { variation: 2, clauses: [ { attribute: "key", op: "in", values: ["zzz"] } ] }, + ], }) - user = { key: 'x' } + context = LDContext.create({ key: 'x' }) detail = EvaluationDetail.new('a', 0, EvaluationReason::fallthrough) - expected_prereqs = [ - PrerequisiteEvalRecord.new(flag1, flag, EvaluationDetail.new('e', 1, EvaluationReason::fallthrough)) - ] - e = EvaluatorBuilder.new(logger).with_flag(flag1).build - result = e.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) - expect(result.prereq_evals).to eq(expected_prereqs) + expect(result.prereq_evals).to eq(nil) end - it "returns fallthrough variation if flag is on and no rules match" do - flag = factory.flag({ + it "reuses fallthrough variation result detail instance" do + flag = Flags.from_hash({ key: 'feature0', on: true, fallthrough: { variation: 0 }, @@ -246,211 +112,186 @@ module Impl variations: ['a', 'b', 'c'], version: 1, rules: [ - { variation: 2, clauses: [ { attribute: "key", op: "in", values: ["zzz"] } ] } - ] + { variation: 2, clauses: [ { attribute: "key", op: "in", values: ["zzz"] } ] }, + ], }) - user = { key: 'x' } + context = LDContext.create({ key: 'x' }) detail = EvaluationDetail.new('a', 0, EvaluationReason::fallthrough) - result = basic_evaluator.evaluate(flag, user) - expect(result.detail).to eq(detail) - expect(result.prereq_evals).to eq(nil) - end - - if factory.with_preprocessing - it "reuses fallthrough variation result detail instance" do - flag = factory.flag({ - key: 'feature0', - on: true, - fallthrough: { variation: 0 }, - offVariation: 1, - variations: ['a', 'b', 'c'], - version: 1, - rules: [ - { variation: 2, clauses: [ { attribute: "key", op: "in", values: ["zzz"] } ] } - ] - }) - user = { key: 'x' } - detail = EvaluationDetail.new('a', 0, EvaluationReason::fallthrough) - result1 = basic_evaluator.evaluate(flag, user) - result2 = basic_evaluator.evaluate(flag, user) - expect(result1.detail).to eq(detail) - expect(result2.detail).to be(result1.detail) - end + result1 = basic_evaluator.evaluate(flag, context) + result2 = basic_evaluator.evaluate(flag, context) + expect(result1.detail).to eq(detail) + expect(result2.detail).to be(result1.detail) end it "returns an error if fallthrough variation is too high" do - flag = factory.flag({ + flag = Flags.from_hash({ key: 'feature', on: true, fallthrough: { variation: 999 }, offVariation: 1, - variations: ['a', 'b', 'c'] + variations: ['a', 'b', 'c'], }) - user = { key: 'userkey' } + context = LDContext.create({ key: 'userkey' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end it "returns an error if fallthrough variation is negative" do - flag = factory.flag({ + flag = Flags.from_hash({ key: 'feature', on: true, fallthrough: { variation: -1 }, offVariation: 1, - variations: ['a', 'b', 'c'] + variations: ['a', 'b', 'c'], }) - user = { key: 'userkey' } + context = LDContext.create({ key: 'userkey' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end it "returns an error if fallthrough has no variation or rollout" do - flag = factory.flag({ + flag = Flags.from_hash({ key: 'feature', on: true, fallthrough: { }, offVariation: 1, - variations: ['a', 'b', 'c'] + variations: ['a', 'b', 'c'], }) - user = { key: 'userkey' } + context = LDContext.create({ key: 'userkey' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end it "returns an error if fallthrough has a rollout with no variations" do - flag = factory.flag({ + flag = Flags.from_hash({ key: 'feature', on: true, fallthrough: { rollout: { variations: [] } }, offVariation: 1, - variations: ['a', 'b', 'c'] + variations: ['a', 'b', 'c'], }) - user = { key: 'userkey' } + context = LDContext.create({ key: 'userkey' }) detail = EvaluationDetail.new(nil, nil, EvaluationReason::error(EvaluationReason::ERROR_MALFORMED_FLAG)) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end - it "matches user from targets" do - flag = factory.flag({ + it "matches context from targets" do + flag = Flags.from_hash({ key: 'feature', on: true, targets: [ - { values: [ 'whoever', 'userkey' ], variation: 2 } + { values: [ 'whoever', 'userkey' ], variation: 2 }, ], fallthrough: { variation: 0 }, offVariation: 1, - variations: ['a', 'b', 'c'] + variations: ['a', 'b', 'c'], }) - user = { key: 'userkey' } + context = LDContext.create({ key: 'userkey' }) detail = EvaluationDetail.new('c', 2, EvaluationReason::target_match) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end - if factory.with_preprocessing - it "reuses target-match result detail instances" do - flag = factory.flag({ - key: 'feature', - on: true, - targets: [ - { values: [ 'whoever', 'userkey' ], variation: 2 } - ], - fallthrough: { variation: 0 }, - offVariation: 1, - variations: ['a', 'b', 'c'] - }) - user = { key: 'userkey' } - detail = EvaluationDetail.new('c', 2, EvaluationReason::target_match) - result1 = basic_evaluator.evaluate(flag, user) - result2 = basic_evaluator.evaluate(flag, user) - expect(result1.detail).to eq(detail) - expect(result2.detail).to be(result1.detail) - end + it "reuses target-match result detail instances" do + flag = Flags.from_hash({ + key: 'feature', + on: true, + targets: [ + { values: [ 'whoever', 'userkey' ], variation: 2 }, + ], + fallthrough: { variation: 0 }, + offVariation: 1, + variations: ['a', 'b', 'c'], + }) + context = LDContext.create({ key: 'userkey' }) + detail = EvaluationDetail.new('c', 2, EvaluationReason::target_match) + result1 = basic_evaluator.evaluate(flag, context) + result2 = basic_evaluator.evaluate(flag, context) + expect(result1.detail).to eq(detail) + expect(result2.detail).to be(result1.detail) end describe "fallthrough experiment/rollout behavior" do it "evaluates rollout for fallthrough" do - flag = factory.flag({ + flag = Flags.from_hash({ key: 'feature0', on: true, - fallthrough: { rollout: { variations: [ { weight: 100000, variation: 1, untracked: false } ] } }, + fallthrough: { rollout: { variations: [ { weight: 100000, variation: 1, untracked: false } ] } }, offVariation: 1, variations: ['a', 'b', 'c'], - version: 1 + version: 1, }) - user = { key: 'x' } + context = LDContext.create({ key: 'x' }) detail = EvaluationDetail.new('b', 1, EvaluationReason::fallthrough) - result = basic_evaluator.evaluate(flag, user) + result = basic_evaluator.evaluate(flag, context) expect(result.detail).to eq(detail) expect(result.prereq_evals).to eq(nil) end - if factory.with_preprocessing - it "reuses fallthrough rollout result detail instance" do - flag = factory.flag({ - key: 'feature0', - on: true, - fallthrough: { rollout: { variations: [ { weight: 100000, variation: 1, untracked: false } ] } }, - offVariation: 1, - variations: ['a', 'b', 'c'], - version: 1 - }) - user = { key: 'x' } - detail = EvaluationDetail.new('b', 1, EvaluationReason::fallthrough) - result1 = basic_evaluator.evaluate(flag, user) - result2 = basic_evaluator.evaluate(flag, user) - expect(result1.detail).to eq(detail) - expect(result2.detail).to be(result1.detail) - end + it "reuses fallthrough rollout result detail instance" do + flag = Flags.from_hash({ + key: 'feature0', + on: true, + fallthrough: { rollout: { variations: [ { weight: 100000, variation: 1, untracked: false } ] } }, + offVariation: 1, + variations: ['a', 'b', 'c'], + version: 1, + }) + context = LDContext.create({ key: 'x' }) + detail = EvaluationDetail.new('b', 1, EvaluationReason::fallthrough) + result1 = basic_evaluator.evaluate(flag, context) + result2 = basic_evaluator.evaluate(flag, context) + expect(result1.detail).to eq(detail) + expect(result2.detail).to be(result1.detail) end it "sets the in_experiment value if rollout kind is experiment and untracked false" do - flag = factory.flag({ + flag = Flags.from_hash({ key: 'feature', on: true, - fallthrough: { rollout: { kind: 'experiment', variations: [ { weight: 100000, variation: 1, untracked: false } ] } }, + fallthrough: { rollout: { kind: 'experiment', variations: [ { weight: 100000, variation: 1, untracked: false } ] } }, offVariation: 1, - variations: ['a', 'b', 'c'] + variations: ['a', 'b', 'c'], }) - user = { key: 'userkey' } - result = basic_evaluator.evaluate(flag, user) + context = LDContext.create({ key: 'userkey' }) + result = basic_evaluator.evaluate(flag, context) expect(result.detail.reason.to_json).to include('"inExperiment":true') expect(result.detail.reason.in_experiment).to eq(true) end it "does not set the in_experiment value if rollout kind is not experiment" do - flag = factory.flag({ + flag = Flags.from_hash({ key: 'feature', on: true, - fallthrough: { rollout: { kind: 'rollout', variations: [ { weight: 100000, variation: 1, untracked: false } ] } }, + fallthrough: { rollout: { kind: 'rollout', variations: [ { weight: 100000, variation: 1, untracked: false } ] } }, offVariation: 1, - variations: ['a', 'b', 'c'] + variations: ['a', 'b', 'c'], }) - user = { key: 'userkey' } - result = basic_evaluator.evaluate(flag, user) + context = LDContext.create({ key: 'userkey' }) + result = basic_evaluator.evaluate(flag, context) expect(result.detail.reason.to_json).to_not include('"inExperiment":true') expect(result.detail.reason.in_experiment).to eq(nil) end it "does not set the in_experiment value if rollout kind is experiment and untracked is true" do - flag = factory.flag({ + flag = Flags.from_hash({ key: 'feature', on: true, - fallthrough: { rollout: { kind: 'experiment', variations: [ { weight: 100000, variation: 1, untracked: true } ] } }, + fallthrough: { rollout: { kind: 'experiment', variations: [ { weight: 100000, variation: 1, untracked: true } ] } }, offVariation: 1, - variations: ['a', 'b', 'c'] + variations: ['a', 'b', 'c'], }) - user = { key: 'userkey' } - result = basic_evaluator.evaluate(flag, user) + context = LDContext.create({ key: 'userkey' }) + result = basic_evaluator.evaluate(flag, context) expect(result.detail.reason.to_json).to_not include('"inExperiment":true') expect(result.detail.reason.in_experiment).to eq(nil) end diff --git a/spec/impl/evaluator_spec_base.rb b/spec/impl/evaluator_spec_base.rb index fc1f0414..0ae1747a 100644 --- a/spec/impl/evaluator_spec_base.rb +++ b/spec/impl/evaluator_spec_base.rb @@ -1,22 +1,9 @@ require "ldclient-rb/impl/big_segments" +require "ldclient-rb/impl/model/serialization" require "model_builders" require "spec_helper" -def evaluator_tests_with_and_without_preprocessing(desc_base) - # In the evaluator tests, we are really testing two sets of evaluation logic: one where preprocessed - # results are not available, and one where they are. In normal usage, flags always get preprocessed and - # we expect evaluations to almost always be able to reuse a preprocessed result-- but we still want to - # verify that the evaluator works even if preprocessing hasn't happened, since a flag is just a Hash and - # so we can't do any type-level enforcement to constrain its state. The DataItemFactory abstraction - # controls whether flags/segments created in these tests do or do not have preprocessing applied. - [true, false].each do |with_preprocessing| - pre_desc = with_preprocessing ? "with preprocessing" : "without preprocessing" - desc = "#{desc_base} - #{pre_desc}" - yield desc, DataItemFactory.new(with_preprocessing) - end -end - module LaunchDarkly module Impl class EvaluatorBuilder @@ -30,7 +17,7 @@ def initialize(logger) end def with_flag(flag) - @flags[flag[:key]] = flag + @flags[flag[:key]] = Model.deserialize(FEATURES, flag) self end @@ -40,7 +27,7 @@ def with_unknown_flag(key) end def with_segment(segment) - @segments[segment[:key]] = segment + @segments[segment[:key]] = Model.deserialize(SEGMENTS, segment) self end @@ -49,10 +36,10 @@ def with_unknown_segment(key) self end - def with_big_segment_for_user(user, segment, included) - user_key = user[:key] - @big_segment_memberships[user_key] = {} if !@big_segment_memberships.has_key?(user_key) - @big_segment_memberships[user_key][Evaluator.make_big_segment_ref(segment)] = included + def with_big_segment_for_context(context, segment, included) + context_key = context.key + @big_segment_memberships[context_key] = {} unless @big_segment_memberships.has_key?(context_key) + @big_segment_memberships[context_key][Evaluator.make_big_segment_ref(segment)] = included self end @@ -73,29 +60,29 @@ def build end private def get_flag(key) - raise "should not have requested flag #{key}" if !@flags.has_key?(key) + raise "should not have requested flag #{key}" unless @flags.has_key?(key) @flags[key] end private def get_segment(key) - raise "should not have requested segment #{key}" if !@segments.has_key?(key) + raise "should not have requested segment #{key}" unless @segments.has_key?(key) @segments[key] end private def get_big_segments(user_key) - raise "should not have requested big segments for #{user_key}" if !@big_segment_memberships.has_key?(user_key) + raise "should not have requested big segments for #{user_key}" unless @big_segment_memberships.has_key?(user_key) @big_segments_queries << user_key BigSegmentMembershipResult.new(@big_segment_memberships[user_key], @big_segments_status) end end module EvaluatorSpecBase - def user - { + def user_context + LDContext::create({ key: "userkey", email: "test@example.com", - name: "Bob" - } + name: "Bob", + }) end def logger @@ -105,33 +92,6 @@ def logger def basic_evaluator EvaluatorBuilder.new(logger).build end - - def make_user_matching_clause(user, attr = :key) - { - attribute: attr.to_s, - op: :in, - values: [ user[attr.to_sym] ], - negate: false - } - end - - def make_segment(key) - { - key: key, - included: [], - excluded: [], - salt: 'abcdef', - version: 1 - } - end - - def make_segment_match_clause(segment) - { - op: :segmentMatch, - values: [ segment[:key] ], - negate: false - } - end end RSpec.configure { |c| c.include EvaluatorSpecBase, :evaluator_spec_base => true } diff --git a/spec/impl/event_summarizer_spec.rb b/spec/impl/event_summarizer_spec.rb index bbd3f2ba..d3eb953a 100644 --- a/spec/impl/event_summarizer_spec.rb +++ b/spec/impl/event_summarizer_spec.rb @@ -2,18 +2,19 @@ require "events_test_util" require "spec_helper" +require "set" module LaunchDarkly module Impl describe EventSummarizer do subject { EventSummarizer } - let(:user) { { key: "key" } } + let(:context) { LaunchDarkly::LDContext.create({ key: "key" }) } it "does not add identify event to summary" do es = subject.new snapshot = es.snapshot - es.summarize_event({ kind: "identify", user: user }) + es.summarize_event({ kind: "identify", context: context }) expect(es.snapshot).to eq snapshot end @@ -21,7 +22,7 @@ module Impl it "does not add custom event to summary" do es = subject.new snapshot = es.snapshot - es.summarize_event({ kind: "custom", key: "whatever", user: user }) + es.summarize_event({ kind: "custom", key: "whatever", context: context }) expect(es.snapshot).to eq snapshot end @@ -29,9 +30,9 @@ module Impl it "tracks start and end dates" do es = subject.new flag = { key: "key" } - event1 = make_eval_event(2000, user, 'flag1') - event2 = make_eval_event(1000, user, 'flag1') - event3 = make_eval_event(1500, user, 'flag1') + event1 = make_eval_event(2000, context, 'flag1') + event2 = make_eval_event(1000, context, 'flag1') + event3 = make_eval_event(1500, context, 'flag1') es.summarize_event(event1) es.summarize_event(event2) es.summarize_event(event3) @@ -45,11 +46,11 @@ module Impl es = subject.new flag1 = { key: "key1", version: 11 } flag2 = { key: "key2", version: 22 } - event1 = make_eval_event(0, user, 'key1', 11, 1, 'value1', nil, 'default1') - event2 = make_eval_event(0, user, 'key1', 11, 2, 'value2', nil, 'default1') - event3 = make_eval_event(0, user, 'key2', 22, 1, 'value99', nil, 'default2') - event4 = make_eval_event(0, user, 'key1', 11, 1, 'value99', nil, 'default1') - event5 = make_eval_event(0, user, 'badkey', nil, nil, 'default3', nil, 'default3') + event1 = make_eval_event(0, context, 'key1', 11, 1, 'value1', nil, 'default1') + event2 = make_eval_event(0, context, 'key1', 11, 2, 'value2', nil, 'default1') + event3 = make_eval_event(0, context, 'key2', 22, 1, 'value99', nil, 'default2') + event4 = make_eval_event(0, context, 'key1', 11, 1, 'value99', nil, 'default1') + event5 = make_eval_event(0, context, 'badkey', nil, nil, 'default3', nil, 'default3') [event1, event2, event3, event4, event5].each { |e| es.summarize_event(e) } data = es.snapshot @@ -58,24 +59,27 @@ module Impl 'default1', { 11 => { 1 => EventSummaryFlagVariationCounter.new('value1', 2), - 2 => EventSummaryFlagVariationCounter.new('value2', 1) - } - } + 2 => EventSummaryFlagVariationCounter.new('value2', 1), + }, + }, + Set.new(["user"]) ), 'key2' => EventSummaryFlagInfo.new( 'default2', { 22 => { - 1 => EventSummaryFlagVariationCounter.new('value99', 1) - } - } + 1 => EventSummaryFlagVariationCounter.new('value99', 1), + }, + }, + Set.new(["user"]) ), 'badkey' => EventSummaryFlagInfo.new( 'default3', { nil => { - nil => EventSummaryFlagVariationCounter.new('default3', 1) - } - } - ) + nil => EventSummaryFlagVariationCounter.new('default3', 1), + }, + }, + Set.new(["user"]) + ), } expect(data.counters).to eq expectedCounters end diff --git a/spec/impl/model/preprocessed_data_spec.rb b/spec/impl/model/preprocessed_data_spec.rb index c805a3d2..753590d5 100644 --- a/spec/impl/model/preprocessed_data_spec.rb +++ b/spec/impl/model/preprocessed_data_spec.rb @@ -1,11 +1,7 @@ +require "ldclient-rb/impl/model/feature_flag" require "model_builders" require "spec_helper" -def strip_preprocessed_nulls(json) - # currently we can't avoid emitting these null properties - we just don't want to see anything other than null there - json.gsub('"_preprocessed":null,', '').gsub(',"_preprocessed":null', '') -end - module LaunchDarkly module Impl module DataModelPreprocessing @@ -19,24 +15,23 @@ module DataModelPreprocessing variations: [true, false], fallthroughVariation: 1, prerequisites: [ - { key: 'a', variation: 0 } + { key: 'a', variation: 0 }, ], targets: [ - { variation: 0, values: ['a'] } + { variation: 0, values: ['a'] }, ], rules: [ { variation: 0, clauses: [ - { attribute: 'key', op: 'in', values: ['a'] } - ] - } - ] + { attribute: 'key', op: 'in', values: ['a'] }, + ], + }, + ], } - flag = clone_json_object(original_flag) - Preprocessor.new().preprocess_flag!(flag) + flag = Model::FeatureFlag.new(original_flag) json = Model.serialize(FEATURES, flag) - parsed = JSON.parse(strip_preprocessed_nulls(json), symbolize_names: true) + parsed = JSON.parse(json, symbolize_names: true) expect(parsed).to eq(original_flag) end end diff --git a/spec/impl/model/serialization_spec.rb b/spec/impl/model/serialization_spec.rb index 0d6fa4de..f2d364eb 100644 --- a/spec/impl/model/serialization_spec.rb +++ b/spec/impl/model/serialization_spec.rb @@ -5,40 +5,30 @@ module LaunchDarkly module Impl module Model describe "model serialization" do - factory = DataItemFactory.new(true) # true = enable the usual preprocessing logic - it "serializes flag" do - flag = { key: "flagkey", version: 1 } + flag = FlagBuilder.new("flagkey").version(1).build json = Model.serialize(FEATURES, flag) - expect(JSON.parse(json, symbolize_names: true)).to eq flag + expect(JSON.parse(json, symbolize_names: true)).to eq flag.data end it "serializes segment" do - segment = { key: "segkey", version: 1 } + segment = SegmentBuilder.new("segkey").version(1).build json = Model.serialize(SEGMENTS, segment) - expect(JSON.parse(json, symbolize_names: true)).to eq segment - end - - it "serializes arbitrary data kind" do - thing = { key: "thingkey", name: "me" } - json = Model.serialize({ name: "things" }, thing) - expect(JSON.parse(json, symbolize_names: true)).to eq thing + expect(JSON.parse(json, symbolize_names: true)).to eq segment.data end it "deserializes flag with no rules or prerequisites" do flag_in = { key: "flagkey", version: 1 } - flag_preprocessed = factory.flag(flag_in) - json = Model.serialize(FEATURES, flag_preprocessed) - flag_out = Model.deserialize(FEATURES, json) - expect(flag_out).to eq flag_preprocessed + json = flag_in.to_json + flag_out = Model.deserialize(FEATURES, json, nil) + expect(flag_out.data).to eq flag_in end it "deserializes segment" do segment_in = { key: "segkey", version: 1 } - segment_preprocessed = factory.segment(segment_in) - json = Model.serialize(SEGMENTS, segment_preprocessed) - segment_out = Model.deserialize(SEGMENTS, json) - expect(segment_out).to eq factory.segment(segment_preprocessed) + json = segment_in.to_json + segment_out = Model.deserialize(SEGMENTS, json, nil) + expect(segment_out.data).to eq segment_in end end end diff --git a/spec/impl/repeating_task_spec.rb b/spec/impl/repeating_task_spec.rb index ba780d78..89f4a408 100644 --- a/spec/impl/repeating_task_spec.rb +++ b/spec/impl/repeating_task_spec.rb @@ -10,7 +10,7 @@ module Impl def null_logger double().as_null_object end - + it "does not start when created" do signal = Concurrent::Event.new task = RepeatingTask.new(0.01, 0, -> { signal.set }, null_logger) @@ -29,7 +29,7 @@ def null_logger task.start 3.times do time = queue.pop - if !last.nil? + unless last.nil? expect(time.to_f - last.to_f).to be >=(0.05) end last = time diff --git a/spec/integrations/consul_feature_store_spec.rb b/spec/integrations/consul_feature_store_spec.rb index e73858fa..356f1679 100644 --- a/spec/integrations/consul_feature_store_spec.rb +++ b/spec/integrations/consul_feature_store_spec.rb @@ -7,7 +7,7 @@ $consul_base_opts = { prefix: $my_prefix, - logger: $null_log + logger: $null_log, } class ConsulStoreTester @@ -28,7 +28,7 @@ def create_feature_store describe "Consul feature store" do break if ENV['LD_SKIP_DATABASE_TESTS'] == '1' - + include_examples "persistent_feature_store", ConsulStoreTester end diff --git a/spec/integrations/dynamodb_stores_spec.rb b/spec/integrations/dynamodb_stores_spec.rb index 8f7c5c07..f12fa63a 100644 --- a/spec/integrations/dynamodb_stores_spec.rb +++ b/spec/integrations/dynamodb_stores_spec.rb @@ -13,12 +13,12 @@ class DynamoDBStoreTester DYNAMODB_OPTS = { credentials: Aws::Credentials.new("key", "secret"), region: "us-east-1", - endpoint: "http://localhost:8000" + endpoint: "http://localhost:8000", } FEATURE_STORE_BASE_OPTS = { dynamodb_opts: DYNAMODB_OPTS, prefix: 'testprefix', - logger: $null_log + logger: $null_log, } def initialize(options = {}) @@ -44,16 +44,16 @@ def self.create_table_if_necessary table_name: TABLE_NAME, key_schema: [ { attribute_name: "namespace", key_type: "HASH" }, - { attribute_name: "key", key_type: "RANGE" } + { attribute_name: "key", key_type: "RANGE" }, ], attribute_definitions: [ { attribute_name: "namespace", attribute_type: "S" }, - { attribute_name: "key", attribute_type: "S" } + { attribute_name: "key", attribute_type: "S" }, ], provisioned_throughput: { read_capacity_units: 1, - write_capacity_units: 1 - } + write_capacity_units: 1, + }, } client.create_table(req) @@ -68,8 +68,8 @@ def clear_data projection_expression: '#namespace, #key', expression_attribute_names: { '#namespace' => 'namespace', - '#key' => 'key' - } + '#key' => 'key', + }, } while true resp = client.scan(req) @@ -94,7 +94,7 @@ def create_feature_store def create_big_segment_store LaunchDarkly::Integrations::DynamoDB::new_big_segment_store(TABLE_NAME, @options) end - + def set_big_segments_metadata(metadata) client = self.class.create_test_client key = @actual_prefix + $DynamoDBBigSegmentStore::KEY_METADATA @@ -103,28 +103,28 @@ def set_big_segments_metadata(metadata) item: { "namespace" => key, "key" => key, - $DynamoDBBigSegmentStore::ATTR_SYNC_TIME => metadata.last_up_to_date + $DynamoDBBigSegmentStore::ATTR_SYNC_TIME => metadata.last_up_to_date, } ) end - def set_big_segments(user_hash, includes, excludes) + def set_big_segments(context_hash, includes, excludes) client = self.class.create_test_client sets = { $DynamoDBBigSegmentStore::ATTR_INCLUDED => Set.new(includes), - $DynamoDBBigSegmentStore::ATTR_EXCLUDED => Set.new(excludes) + $DynamoDBBigSegmentStore::ATTR_EXCLUDED => Set.new(excludes), } sets.each do |attr_name, values| - if !values.empty? + unless values.empty? client.update_item( table_name: TABLE_NAME, key: { - "namespace" => @actual_prefix + $DynamoDBBigSegmentStore::KEY_USER_DATA, - "key" => user_hash + "namespace" => @actual_prefix + $DynamoDBBigSegmentStore::KEY_CONTEXT_DATA, + "key" => context_hash, }, update_expression: "ADD #{attr_name} :value", expression_attribute_values: { - ":value" => values + ":value" => values, } ) end diff --git a/spec/integrations/redis_stores_spec.rb b/spec/integrations/redis_stores_spec.rb index 4f26cbb0..d5c503bc 100644 --- a/spec/integrations/redis_stores_spec.rb +++ b/spec/integrations/redis_stores_spec.rb @@ -38,7 +38,7 @@ def create_feature_store def create_big_segment_store LaunchDarkly::Integrations::Redis.new_big_segment_store(@options) end - + def set_big_segments_metadata(metadata) with_redis_test_client do |client| client.set(@actual_prefix + $RedisBigSegmentStore::KEY_LAST_UP_TO_DATE, @@ -46,13 +46,13 @@ def set_big_segments_metadata(metadata) end end - def set_big_segments(user_hash, includes, excludes) + def set_big_segments(context_hash, includes, excludes) with_redis_test_client do |client| includes.each do |ref| - client.sadd(@actual_prefix + $RedisBigSegmentStore::KEY_USER_INCLUDE + user_hash, ref) + client.sadd?(@actual_prefix + $RedisBigSegmentStore::KEY_CONTEXT_INCLUDE + context_hash, ref) end excludes.each do |ref| - client.sadd(@actual_prefix + $RedisBigSegmentStore::KEY_USER_EXCLUDE + user_hash, ref) + client.sadd?(@actual_prefix + $RedisBigSegmentStore::KEY_CONTEXT_EXCLUDE + context_hash, ref) end end end @@ -85,7 +85,7 @@ def make_concurrent_modifier_test_hook(other_client, flag, start_version, end_ve flag = { key: "foo", version: 1 } test_hook = make_concurrent_modifier_test_hook(other_client, flag, 2, 4) tester = RedisStoreTester.new({ test_hook: test_hook, logger: $null_logger }) - + ensure_stop(tester.create_feature_store) do |store| store.init(LaunchDarkly::FEATURES => { flag[:key] => flag }) diff --git a/spec/integrations/store_wrapper_spec.rb b/spec/integrations/store_wrapper_spec.rb index e7890802..58def5a8 100644 --- a/spec/integrations/store_wrapper_spec.rb +++ b/spec/integrations/store_wrapper_spec.rb @@ -238,7 +238,7 @@ def initialize attr_accessor :inited def force_set(kind, item) - @data[kind] = {} if !@data.has_key?(kind) + @data[kind] = {} unless @data.has_key?(kind) @data[kind][item[:key]] = item end @@ -261,7 +261,7 @@ def get_all_internal(kind) end def upsert_internal(kind, item) - @data[kind] = {} if !@data.has_key?(kind) + @data[kind] = {} unless @data.has_key?(kind) old_item = @data[kind][item[:key]] return old_item if !old_item.nil? && old_item[:version] >= item[:version] @data[kind][item[:key]] = item diff --git a/spec/integrations/test_data_spec.rb b/spec/integrations/test_data_spec.rb index 75418bd3..f576175d 100644 --- a/spec/integrations/test_data_spec.rb +++ b/spec/integrations/test_data_spec.rb @@ -16,13 +16,13 @@ module Integrations td.update(td.flag('flag')) config = Config.new(send_events: false, data_source: td) client = LDClient.new('sdkKey', config) - expect(config.feature_store.get(FEATURES, 'flag')).to eql({ + expect(config.feature_store.get(FEATURES, 'flag').data).to eql({ key: 'flag', variations: [true, false], fallthrough: { variation: 0 }, offVariation: 1, on: true, - version: 1 + version: 1, }) client.close end @@ -35,40 +35,40 @@ module Integrations config2 = Config.new(send_events: false, data_source: td) client2 = LDClient.new('sdkKey', config2) - expect(config.feature_store.get(FEATURES, 'flag')).to eql({ + expect(config.feature_store.get(FEATURES, 'flag').data).to eql({ key: 'flag', variations: [true, false], fallthrough: { variation: 0 }, offVariation: 1, on: true, - version: 1 + version: 1, }) - expect(config2.feature_store.get(FEATURES, 'flag')).to eql({ + expect(config2.feature_store.get(FEATURES, 'flag').data).to eql({ key: 'flag', variations: [true, false], fallthrough: { variation: 0 }, offVariation: 1, on: true, - version: 1 + version: 1, }) - td.update(td.flag('flag').variation_for_all_users(false)) + td.update(td.flag('flag').variation_for_all(false)) - expect(config.feature_store.get(FEATURES, 'flag')).to eql({ + expect(config.feature_store.get(FEATURES, 'flag').data).to eql({ key: 'flag', variations: [true, false], fallthrough: { variation: 1 }, offVariation: 1, on: true, - version: 2 + version: 2, }) - expect(config2.feature_store.get(FEATURES, 'flag')).to eql({ + expect(config2.feature_store.get(FEATURES, 'flag').data).to eql({ key: 'flag', variations: [true, false], fallthrough: { variation: 1 }, offVariation: 1, on: true, - version: 2 + version: 2, }) client.close @@ -83,22 +83,22 @@ module Integrations config = Config.new(send_events: false, data_source: td) client = LDClient.new('sdkKey', config) - expect(config.feature_store.get(FEATURES, 'my-flag')).to eql({ + expect(config.feature_store.get(FEATURES, 'my-flag').data).to eql({ key: 'my-flag', version: 1000, on: true }) - expect(config.feature_store.get(SEGMENTS, 'my-segment')).to eql({ + expect(config.feature_store.get(SEGMENTS, 'my-segment').data).to eql({ key: 'my-segment', version: 2000 }) td.use_preconfigured_flag({ key: 'my-flag', on: false }) - expect(config.feature_store.get(FEATURES, 'my-flag')).to eql({ + expect(config.feature_store.get(FEATURES, 'my-flag').data).to eql({ key: 'my-flag', version: 1001, on: false }) td.use_preconfigured_segment({ key: 'my-segment', included: [ 'x' ] }) - expect(config.feature_store.get(SEGMENTS, 'my-segment')).to eql({ + expect(config.feature_store.get(SEGMENTS, 'my-segment').data).to eql({ key: 'my-segment', version: 2001, included: [ 'x' ] }) @@ -115,11 +115,11 @@ module Integrations it 'TestData.flag returns a copy of the existing flag if it exists' do td = TestData.new - td.update(td.flag('flag').variation_for_all_users(true)) + td.update(td.flag('flag').variation_for_all(true)) expect(td.flag('flag').build(0)[:fallthrough][:variation]).to eq(0) #modify the flag but dont call update - td.flag('flag').variation_for_all_users(false).build(0) + td.flag('flag').variation_for_all(false).build(0) expect(td.flag('flag').build(0)[:fallthrough][:variation]).to eq(0) end @@ -172,7 +172,7 @@ module Integrations end it 'can set variation for all users' do - f = TestData::FlagBuilder.new('flag').variation_for_all_users(true).build(1) + f = TestData::FlagBuilder.new('flag').variation_for_all(true).build(1) expect(f[:rules]).to be_nil expect(f[:targets]).to be_nil expect(f[:fallthrough][:variation]).to be(0) @@ -183,7 +183,7 @@ module Integrations .if_match('name', 'ben') .then_return(false) .variation_for_user('ben', false) - .variation_for_all_users(true).build(1) + .variation_for_all(true).build(1) expect(f.keys).to_not include(:rules) expect(f.keys).to_not include(:targets) expect(f[:fallthrough][:variation]).to be(0) @@ -199,10 +199,10 @@ module Integrations end it 'can make an immutable copy of its self' do - fb = TestData::FlagBuilder.new('flag').variation_for_all_users(true) + fb = TestData::FlagBuilder.new('flag').variation_for_all(true) expect(fb.build(0)).to eql(fb.clone.build(0)) - fcopy = fb.clone.variation_for_all_users(false).build(0) + fcopy = fb.clone.variation_for_all(false).build(0) f = fb.build(0) expect(f[:key]).to eql(fcopy[:key]) @@ -221,18 +221,20 @@ module Integrations id: "rule0", variation: 0, clauses: [{ + contextKind: "user", attribute: 'name', op: 'in', values: ['ben'], negate: false, }, { + contextKind: "user", attribute: 'country', op: 'in', values: ['fr'], negate: true, - } - ] + }, + ], }]) end end diff --git a/spec/ldclient_end_to_end_spec.rb b/spec/ldclient_end_to_end_spec.rb index 19c6c241..9f9de608 100644 --- a/spec/ldclient_end_to_end_spec.rb +++ b/spec/ldclient_end_to_end_spec.rb @@ -5,8 +5,8 @@ ALWAYS_TRUE_FLAG = { key: 'flagkey', version: 1, on: false, offVariation: 1, variations: [ false, true ] } DATA_WITH_ALWAYS_TRUE_FLAG = { - flags: { ALWAYS_TRUE_FLAG[:key ].to_sym => ALWAYS_TRUE_FLAG }, - segments: {} + flags: { ALWAYS_TRUE_FLAG[:key ].to_sym => ALWAYS_TRUE_FLAG }, + segments: {}, } PUT_EVENT_WITH_ALWAYS_TRUE_FLAG = "event: put\ndata:{\"data\":#{DATA_WITH_ALWAYS_TRUE_FLAG.to_json}}\n\n'" @@ -18,10 +18,10 @@ module LaunchDarkly it "starts in polling mode" do with_server do |poll_server| poll_server.setup_ok_response("/sdk/latest-all", DATA_WITH_ALWAYS_TRUE_FLAG.to_json, "application/json") - + with_client(test_config(stream: false, data_source: nil, base_uri: poll_server.base_uri.to_s)) do |client| expect(client.initialized?).to be true - expect(client.variation(ALWAYS_TRUE_FLAG[:key], basic_user, false)).to be true + expect(client.variation(ALWAYS_TRUE_FLAG[:key], basic_context, false)).to be true end end end @@ -29,10 +29,10 @@ module LaunchDarkly it "fails in polling mode with 401 error" do with_server do |poll_server| poll_server.setup_status_response("/sdk/latest-all", 401) - + with_client(test_config(stream: false, data_source: nil, base_uri: poll_server.base_uri.to_s)) do |client| expect(client.initialized?).to be false - expect(client.variation(ALWAYS_TRUE_FLAG[:key], basic_user, false)).to be false + expect(client.variation(ALWAYS_TRUE_FLAG[:key], basic_context, false)).to be false end end end @@ -40,14 +40,14 @@ module LaunchDarkly it "sends event without diagnostics" do with_server do |events_server| events_server.setup_ok_response("/bulk", "") - + config = test_config( send_events: true, events_uri: events_server.base_uri.to_s, diagnostic_opt_out: true ) with_client(config) do |client| - client.identify(basic_user) + client.identify(basic_context) client.flush req, body = events_server.await_request_with_body @@ -64,13 +64,13 @@ module LaunchDarkly with_server do |events_server| events_server.setup_ok_response("/bulk", "") events_server.setup_ok_response("/diagnostic", "") - + config = test_config( send_events: true, events_uri: events_server.base_uri.to_s ) with_client(config) do |client| - client.identify(basic_user) + client.identify(basic_context) client.flush req0, body0 = events_server.await_request_with_body @@ -90,7 +90,7 @@ module LaunchDarkly with_server do |events_server| events_server.setup_ok_response("/bulk", "") poll_server.setup_ok_response("/sdk/latest-all", '{"flags":{},"segments":{}}', "application/json") - + config = test_config( stream: false, data_source: nil, @@ -100,11 +100,11 @@ module LaunchDarkly diagnostic_opt_out: true, socket_factory: SocketFactoryFromHash.new({ "fake-polling-server" => poll_server.port, - "fake-events-server" => events_server.port + "fake-events-server" => events_server.port, }) ) with_client(config) do |client| - client.identify(basic_user) + client.identify(basic_context) client.flush req, body = events_server.await_request_with_body diff --git a/spec/ldclient_evaluation_spec.rb b/spec/ldclient_evaluation_spec.rb index 581f3256..424276fc 100644 --- a/spec/ldclient_evaluation_spec.rb +++ b/spec/ldclient_evaluation_spec.rb @@ -9,23 +9,23 @@ module LaunchDarkly context "variation" do it "returns the default value if the client is offline" do with_client(test_config(offline: true)) do |offline_client| - result = offline_client.variation("doesntmatter", basic_user, "default") + result = offline_client.variation("doesntmatter", basic_context, "default") expect(result).to eq "default" end end it "returns the default value for an unknown feature" do with_client(test_config) do |client| - expect(client.variation("badkey", basic_user, "default")).to eq "default" + expect(client.variation("badkey", basic_context, "default")).to eq "default" end end it "returns the value for an existing feature" do td = Integrations::TestData.data_source - td.update(td.flag("flagkey").variations("value").variation_for_all_users(0)) - + td.update(td.flag("flagkey").variations("value").variation_for_all(0)) + with_client(test_config(data_source: td)) do |client| - expect(client.variation("flagkey", basic_user, "default")).to eq "value" + expect(client.variation("flagkey", basic_context, "default")).to eq "value" end end @@ -34,17 +34,17 @@ module LaunchDarkly td.use_preconfigured_flag({ # TestData normally won't construct a flag with offVariation: nil key: "flagkey", on: false, - offVariation: nil + offVariation: nil, }) with_client(test_config(data_source: td)) do |client| - expect(client.variation("flagkey", basic_user, "default")).to eq "default" + expect(client.variation("flagkey", basic_context, "default")).to eq "default" end end it "can evaluate a flag that references a segment" do td = Integrations::TestData.data_source - segment = SegmentBuilder.new("segmentkey").included(basic_user[:key]).build + segment = SegmentBuilder.new("segmentkey").included(basic_context.key).build td.use_preconfigured_segment(segment) td.use_preconfigured_flag( FlagBuilder.new("flagkey").on(true).variations(true, false).rule( @@ -52,7 +52,7 @@ module LaunchDarkly ).build) with_client(test_config(data_source: td)) do |client| - expect(client.variation("flagkey", basic_user, false)).to be true + expect(client.variation("flagkey", basic_context, false)).to be true end end @@ -66,11 +66,11 @@ module LaunchDarkly ).build) segstore = MockBigSegmentStore.new - segstore.setup_segment_for_user(basic_user[:key], segment, true) + segstore.setup_segment_for_context(basic_context.key, segment, true) big_seg_config = BigSegmentsConfig.new(store: segstore) with_client(test_config(data_source: td, big_segments: big_seg_config)) do |client| - expect(client.variation("flagkey", basic_user, false)).to be true + expect(client.variation("flagkey", basic_context, false)).to be true end end end @@ -81,7 +81,7 @@ module LaunchDarkly it "returns the default value if the client is offline" do with_client(test_config(offline: true)) do |offline_client| - result = offline_client.variation_detail("doesntmatter", basic_user, "default") + result = offline_client.variation_detail("doesntmatter", basic_context, "default") expected = EvaluationDetail.new("default", nil, EvaluationReason::error(EvaluationReason::ERROR_CLIENT_NOT_READY)) expect(result).to eq expected end @@ -89,7 +89,7 @@ module LaunchDarkly it "returns the default value for an unknown feature" do with_client(test_config) do |client| - result = client.variation_detail("badkey", basic_user, "default") + result = client.variation_detail("badkey", basic_context, "default") expected = EvaluationDetail.new("default", nil, EvaluationReason::error(EvaluationReason::ERROR_FLAG_NOT_FOUND)) expect(result).to eq expected end @@ -98,9 +98,9 @@ module LaunchDarkly it "returns a value for an existing feature" do td = Integrations::TestData.data_source td.update(td.flag("flagkey").variations("value").on(false).off_variation(0)) - + with_client(test_config(data_source: td)) do |client| - result = client.variation_detail("flagkey", basic_user, "default") + result = client.variation_detail("flagkey", basic_context, "default") expected = EvaluationDetail.new("value", 0, EvaluationReason::off) expect(result).to eq expected end @@ -111,11 +111,11 @@ module LaunchDarkly td.use_preconfigured_flag({ # TestData normally won't construct a flag with offVariation: nil key: "flagkey", on: false, - offVariation: nil + offVariation: nil, }) with_client(test_config(data_source: td)) do |client| - result = client.variation_detail("flagkey", basic_user, "default") + result = client.variation_detail("flagkey", basic_context, "default") expected = EvaluationDetail.new("default", nil, EvaluationReason::off) expect(result).to eq expected expect(result.default_value?).to be true @@ -132,57 +132,18 @@ module LaunchDarkly ).build) segstore = MockBigSegmentStore.new - segstore.setup_segment_for_user(basic_user[:key], segment, true) + segstore.setup_segment_for_context(basic_context.key, segment, true) segstore.setup_metadata(Time.now) big_seg_config = BigSegmentsConfig.new(store: segstore) with_client(test_config(data_source: td, big_segments: big_seg_config)) do |client| - result = client.variation_detail("flagkey", basic_user, false) + result = client.variation_detail("flagkey", basic_context, false) expect(result.value).to be true expect(result.reason.big_segments_status).to eq(BigSegmentsStatus::HEALTHY) end end end - describe "all_flags" do - let(:flag1) { { key: "key1", offVariation: 0, variations: [ 'value1' ] } } - let(:flag2) { { key: "key2", offVariation: 0, variations: [ 'value2' ] } } - let(:test_data) { - td = Integrations::TestData.data_source - td.use_preconfigured_flag(flag1) - td.use_preconfigured_flag(flag2) - td - } - - it "returns flag values" do - with_client(test_config(data_source: test_data)) do |client| - result = client.all_flags({ key: 'userkey' }) - expect(result).to eq({ 'key1' => 'value1', 'key2' => 'value2' }) - end - end - - it "returns empty map for nil user" do - with_client(test_config(data_source: test_data)) do |client| - result = client.all_flags(nil) - expect(result).to eq({}) - end - end - - it "returns empty map for nil user key" do - with_client(test_config(data_source: test_data)) do |client| - result = client.all_flags({}) - expect(result).to eq({}) - end - end - - it "returns empty map if offline" do - with_client(test_config(data_source: test_data, offline: true)) do |offline_client| - result = offline_client.all_flags(nil) - expect(result).to eq({}) - end - end - end - context "all_flags_state" do let(:flag1) { { key: "key1", version: 100, offVariation: 0, variations: [ 'value1' ], trackEvents: false } } let(:flag2) { { key: "key2", version: 200, offVariation: 1, variations: [ 'x', 'value2' ], trackEvents: true, debugEventsUntilDate: 1000 } } @@ -201,7 +162,7 @@ module LaunchDarkly values = state.values_map expect(values).to eq({ 'key1' => 'value1', 'key2' => 'value2' }) - + result = state.as_json expect(result).to eq({ 'key1' => 'value1', @@ -209,16 +170,16 @@ module LaunchDarkly '$flagsState' => { 'key1' => { :variation => 0, - :version => 100 + :version => 100, }, 'key2' => { :variation => 1, :version => 200, :trackEvents => true, - :debugEventsUntilDate => 1000 - } + :debugEventsUntilDate => 1000, + }, }, - '$valid' => true + '$valid' => true, }) end end @@ -252,7 +213,7 @@ module LaunchDarkly values = state.values_map expect(values).to eq({ 'key1' => 'value1', 'key2' => 'value2', 'key3' => 'value3' }) - + result = state.as_json expect(result).to eq({ 'key1' => 'value1', @@ -260,25 +221,25 @@ module LaunchDarkly 'key3' => 'value3', '$flagsState' => { 'key1' => { - :variation => 0 + :variation => 0, }, 'key2' => { :variation => 1, :version => 200, - :trackEvents => true + :trackEvents => true, }, 'key3' => { :variation => 1, :version => 300, - :debugEventsUntilDate => future_time - } + :debugEventsUntilDate => future_time, + }, }, - '$valid' => true + '$valid' => true, }) end end - it "returns empty state for nil user" do + it "returns empty state for nil context" do with_client(test_config(data_source: test_data)) do |client| state = client.all_flags_state(nil) expect(state.valid?).to be false @@ -286,7 +247,7 @@ module LaunchDarkly end end - it "returns empty state for nil user key" do + it "returns empty state for nil context key" do with_client(test_config(data_source: test_data)) do |client| state = client.all_flags_state({}) expect(state.valid?).to be false diff --git a/spec/ldclient_events_spec.rb b/spec/ldclient_events_spec.rb index ba82617b..62adda39 100644 --- a/spec/ldclient_events_spec.rb +++ b/spec/ldclient_events_spec.rb @@ -16,35 +16,37 @@ def event_processor(client) expect(event_processor(client)).to be_a(LaunchDarkly::NullEventProcessor) end end - + context "evaluation events - variation" do it "unknown flag" do with_client(test_config) do |client| + context = basic_context expect(event_processor(client)).to receive(:record_eval_event).with( - basic_user, 'badkey', nil, nil, 'default', nil, 'default', false, nil, nil + context, 'badkey', nil, nil, 'default', nil, 'default', false, nil, nil ) - client.variation("badkey", basic_user, "default") + client.variation("badkey", context, "default") end end it "known flag" do td = Integrations::TestData.data_source - td.update(td.flag("flagkey").variations("value").variation_for_all_users(0)) - + td.update(td.flag("flagkey").variations("value").variation_for_all(0)) + + context = basic_context with_client(test_config(data_source: td)) do |client| expect(event_processor(client)).to receive(:record_eval_event).with( - basic_user, 'flagkey', 1, 0, 'value', nil, 'default', false, nil, nil + context, 'flagkey', 1, 0, 'value', nil, 'default', false, nil, nil ) - client.variation("flagkey", basic_user, "default") + client.variation("flagkey", context, "default") end end - it "does not send event, and logs error, if user is nil" do + it "does not send event, and logs error, if context is nil" do td = Integrations::TestData.data_source - td.update(td.flag("flagkey").variations("value").variation_for_all_users(0)) + td.update(td.flag("flagkey").variations("value").variation_for_all(0)) logger = double().as_null_object - + with_client(test_config(data_source: td, logger: logger)) do |client| expect(event_processor(client)).not_to receive(:record_eval_event) expect(logger).to receive(:error) @@ -52,16 +54,16 @@ def event_processor(client) end end - it "does not send event, and logs warning, if user key is nil" do + it "does not send event, and logs error, if context key is nil" do td = Integrations::TestData.data_source - td.update(td.flag("flagkey").variations("value").variation_for_all_users(0)) + td.update(td.flag("flagkey").variations("value").variation_for_all(0)) logger = double().as_null_object keyless_user = { key: nil } with_client(test_config(data_source: td, logger: logger)) do |client| expect(event_processor(client)).not_to receive(:record_eval_event) - expect(logger).to receive(:warn) + expect(logger).to receive(:error) client.variation("flagkey", keyless_user, "default") end end @@ -69,34 +71,36 @@ def event_processor(client) it "sets trackEvents and reason if trackEvents is set for matched rule" do td = Integrations::TestData.data_source td.use_preconfigured_flag( - FlagBuilder.new("flagkey").version(100).on(true).variations("value"). - rule(RuleBuilder.new.variation(0).id("id").track_events(true). - clause(Clauses.match_user(basic_user))). - build + FlagBuilder.new("flagkey").version(100).on(true).variations("value") + .rule(RuleBuilder.new.variation(0).id("id").track_events(true) + .clause(Clauses.match_context(basic_context))) + .build ) + context = basic_context with_client(test_config(data_source: td)) do |client| expect(event_processor(client)).to receive(:record_eval_event).with( - basic_user, 'flagkey', 100, 0, 'value', LaunchDarkly::EvaluationReason::rule_match(0, 'id'), + context, 'flagkey', 100, 0, 'value', LaunchDarkly::EvaluationReason::rule_match(0, 'id'), 'default', true, nil, nil ) - client.variation("flagkey", basic_user, "default") + client.variation("flagkey", context, "default") end end it "sets trackEvents and reason if trackEventsFallthrough is set and we fell through" do td = Integrations::TestData.data_source td.use_preconfigured_flag( - FlagBuilder.new("flagkey").version(100).on(true).variations("value").fallthrough_variation(0). - track_events_fallthrough(true).build + FlagBuilder.new("flagkey").version(100).on(true).variations("value").fallthrough_variation(0) + .track_events_fallthrough(true).build ) + context = basic_context with_client(test_config(data_source: td)) do |client| expect(event_processor(client)).to receive(:record_eval_event).with( - basic_user, 'flagkey', 100, 0, 'value', LaunchDarkly::EvaluationReason::fallthrough, + context, 'flagkey', 100, 0, 'value', LaunchDarkly::EvaluationReason::fallthrough, 'default', true, nil, nil ) - client.variation("flagkey", basic_user, "default") + client.variation("flagkey", context, "default") end end end @@ -104,12 +108,13 @@ def event_processor(client) context "evaluation events - variation_detail" do it "unknown flag" do with_client(test_config) do |client| + context = basic_context expect(event_processor(client)).to receive(:record_eval_event).with( - basic_user, 'badkey', nil, nil, 'default', + context, 'badkey', nil, nil, 'default', LaunchDarkly::EvaluationReason::error(LaunchDarkly::EvaluationReason::ERROR_FLAG_NOT_FOUND), 'default', false, nil, nil ) - client.variation_detail("badkey", basic_user, "default") + client.variation_detail("badkey", context, "default") end end @@ -117,16 +122,17 @@ def event_processor(client) td = Integrations::TestData.data_source td.update(td.flag("flagkey").variations("value").on(false).off_variation(0)) + context = basic_context with_client(test_config(data_source: td)) do |client| expect(event_processor(client)).to receive(:record_eval_event).with( - basic_user, 'flagkey', 1, 0, 'value', LaunchDarkly::EvaluationReason::off, + context, 'flagkey', 1, 0, 'value', LaunchDarkly::EvaluationReason::off, 'default', false, nil, nil ) - client.variation_detail("flagkey", basic_user, "default") + client.variation_detail("flagkey", context, "default") end end - it "does not send event, and logs error, if user is nil" do + it "does not send event, and logs error, if context is nil" do td = Integrations::TestData.data_source td.update(td.flag("flagkey").variations("value").on(false).off_variation(0)) @@ -139,7 +145,7 @@ def event_processor(client) end end - it "does not send event, and logs warning, if user key is nil" do + it "does not send event, and logs warning, if context key is nil" do td = Integrations::TestData.data_source td.update(td.flag("flagkey").variations("value").on(false).off_variation(0)) @@ -147,21 +153,22 @@ def event_processor(client) with_client(test_config(data_source: td, logger: logger)) do |client| expect(event_processor(client)).not_to receive(:record_eval_event) - expect(logger).to receive(:warn) + expect(logger).to receive(:error) client.variation_detail("flagkey", { key: nil }, "default") end end end - context "identify" do + context "identify" do it "queues up an identify event" do + context = basic_context with_client(test_config) do |client| - expect(event_processor(client)).to receive(:record_identify_event).with(basic_user) - client.identify(basic_user) + expect(event_processor(client)).to receive(:record_identify_event).with(context) + client.identify(context) end end - it "does not send event, and logs warning, if user is nil" do + it "does not send event, and logs warning, if context is nil" do logger = double().as_null_object with_client(test_config(logger: logger)) do |client| @@ -171,9 +178,9 @@ def event_processor(client) end end - it "does not send event, and logs warning, if user key is blank" do + it "does not send event, and logs warning, if context key is blank" do logger = double().as_null_object - + with_client(test_config(logger: logger)) do |client| expect(event_processor(client)).not_to receive(:record_identify_event) expect(logger).to receive(:warn) @@ -182,38 +189,28 @@ def event_processor(client) end end - context "track" do + context "track" do it "queues up an custom event" do + context = basic_context with_client(test_config) do |client| expect(event_processor(client)).to receive(:record_custom_event).with( - basic_user, 'custom_event_name', 42, nil + context, 'custom_event_name', 42, nil ) - client.track("custom_event_name", basic_user, 42) + client.track("custom_event_name", context, 42) end end it "can include a metric value" do + context = basic_context with_client(test_config) do |client| expect(event_processor(client)).to receive(:record_custom_event).with( - basic_user, 'custom_event_name', nil, 1.5 + context, 'custom_event_name', nil, 1.5 ) - client.track("custom_event_name", basic_user, nil, 1.5) + client.track("custom_event_name", context, nil, 1.5) end end - it "sanitizes the user in the event" do - numeric_key_user = { key: 33 } - sanitized_user = { key: "33" } - - with_client(test_config) do |client| - expect(event_processor(client)).to receive(:record_custom_event).with( - sanitized_user, 'custom_event_name', nil, nil - ) - client.track("custom_event_name", numeric_key_user, nil) - end - end - - it "does not send event, and logs a warning, if user is nil" do + it "does not send event, and logs a warning, if context is nil" do logger = double().as_null_object with_client(test_config(logger: logger)) do |client| @@ -223,7 +220,7 @@ def event_processor(client) end end - it "does not send event, and logs warning, if user key is nil" do + it "does not send event, and logs warning, if context key is nil" do logger = double().as_null_object with_client(test_config(logger: logger)) do |client| @@ -233,36 +230,5 @@ def event_processor(client) end end end - - context "alias" do - it "queues up an alias event" do - anon_user = { key: "user-key", anonymous: true } - - with_client(test_config) do |client| - expect(event_processor(client)).to receive(:record_alias_event).with(basic_user, anon_user) - client.alias(basic_user, anon_user) - end - end - - it "does not send event, and logs warning, if user is nil" do - logger = double().as_null_object - - with_client(test_config(logger: logger)) do |client| - expect(event_processor(client)).not_to receive(:record_alias_event) - expect(logger).to receive(:warn) - client.alias(nil, nil) - end - end - - it "does not send event, and logs warning, if user key is nil" do - logger = double().as_null_object - - with_client(test_config(logger: logger)) do |client| - expect(event_processor(client)).not_to receive(:record_alias_event) - expect(logger).to receive(:warn) - client.alias({ key: nil }, { key: nil }) - end - end - end end end diff --git a/spec/ldclient_spec.rb b/spec/ldclient_spec.rb index ef689deb..ad56b800 100644 --- a/spec/ldclient_spec.rb +++ b/spec/ldclient_spec.rb @@ -63,11 +63,11 @@ module LaunchDarkly c: { key: "c" }, d: { key: "d" }, e: { key: "e" }, - f: { key: "f" } + f: { key: "f" }, }, SEGMENTS => { - o: { key: "o" } - } + o: { key: "o" }, + }, } } @@ -76,12 +76,12 @@ module LaunchDarkly td = Integrations::TestData.data_source dependency_ordering_test_data[FEATURES].each { |key, flag| td.use_preconfigured_flag(flag) } dependency_ordering_test_data[SEGMENTS].each { |key, segment| td.use_preconfigured_segment(segment) } - + with_client(test_config(feature_store: store, data_source: td)) do |client| data = store.received_data expect(data).not_to be_nil expect(data.count).to eq(2) - + # Segments should always come first expect(data.keys[0]).to be(SEGMENTS) expect(data.values[0].count).to eq(dependency_ordering_test_data[SEGMENTS].count) diff --git a/spec/mock_components.rb b/spec/mock_components.rb index 07dd851a..38b1afcb 100644 --- a/spec/mock_components.rb +++ b/spec/mock_components.rb @@ -20,7 +20,7 @@ def base_config { data_source: null_data, send_events: false, - logger: null_logger + logger: null_logger, } end @@ -34,8 +34,8 @@ def with_client(config) end end -def basic_user - { "key": "user-key" } +def basic_context + LaunchDarkly::LDContext::create({ "key": "user-key" }) end module LaunchDarkly @@ -58,12 +58,12 @@ def initialize end def get_metadata - raise @metadata_error if !@metadata_error.nil? + raise @metadata_error unless @metadata_error.nil? @metadata end - def get_membership(user_hash) - @memberships[user_hash] + def get_membership(context_hash) + @memberships[context_hash] end def stop @@ -77,13 +77,8 @@ def setup_metadata_error(ex) @metadata_error = ex end - def setup_membership(user_key, membership) - user_hash = Impl::BigSegmentStoreManager.hash_for_user_key(user_key) - @memberships[user_hash] = membership - end - - def setup_segment_for_user(user_key, segment, included) - user_hash = Impl::BigSegmentStoreManager.hash_for_user_key(user_key) + def setup_segment_for_context(user_key, segment, included) + user_hash = Impl::BigSegmentStoreManager.hash_for_context_key(user_key) @memberships[user_hash] ||= {} @memberships[user_hash][Impl::Evaluator.make_big_segment_ref(segment)] = included end diff --git a/spec/model_builders.rb b/spec/model_builders.rb index 366155da..110e184e 100644 --- a/spec/model_builders.rb +++ b/spec/model_builders.rb @@ -1,43 +1,45 @@ -require "ldclient-rb/impl/model/preprocessed_data" +require "ldclient-rb/impl/model/feature_flag" +require "ldclient-rb/impl/model/segment" require "json" -def clone_json_object(o) - JSON.parse(o.to_json, symbolize_names: true) -end - -class DataItemFactory - def initialize(with_preprocessing) - @with_preprocessing = with_preprocessing - end - - def flag(flag_data) - @with_preprocessing ? preprocessed_flag(flag_data) : flag_data +class Flags + def self.from_hash(data) + LaunchDarkly::Impl::Model.deserialize(LaunchDarkly::FEATURES, data) end - def segment(segment_data) - @with_preprocessing ? preprocessed_segment(segment_data) : segment_data + def self.boolean_flag_with_rules(*rules) + builder = FlagBuilder.new("feature").on(true).variations(false, true).fallthrough_variation(0) + rules.each { |r| builder.rule(r) } + builder.build end - def boolean_flag_with_rules(rules) - flag({ key: 'feature', on: true, rules: rules, fallthrough: { variation: 0 }, variations: [ false, true ] }) + def self.boolean_flag_with_clauses(*clauses) + self.boolean_flag_with_rules({ id: 'ruleid', clauses: clauses, variation: 1 }) end +end - def boolean_flag_with_clauses(clauses) - flag(boolean_flag_with_rules([{ id: 'ruleid', clauses: clauses, variation: 1 }])) +class Segments + def self.from_hash(data) + LaunchDarkly::Impl::Model.deserialize(LaunchDarkly::SEGMENTS, data) end +end - attr_reader :with_preprocessing - - private def preprocessed_flag(o) - ret = clone_json_object(o) - LaunchDarkly::Impl::DataModelPreprocessing::Preprocessor.new().preprocess_flag!(ret) - ret +class Clauses + def self.match_segment(segment) + { + "attribute": "", + "op": "segmentMatch", + "values": [ segment.is_a?(String) ? segment : segment[:key] ], + } end - private def preprocessed_segment(o) - ret = clone_json_object(o) - LaunchDarkly::Impl::DataModelPreprocessing::Preprocessor.new().preprocess_segment!(ret) - ret + def self.match_context(context, attr = :key) + { + "attribute": attr.to_s, + "op": "in", + "values": [ context.get_value(attr) ], + "contextKind": context.individual_context(0).kind, + } end end @@ -47,12 +49,12 @@ def initialize(key) key: key, version: 1, variations: [ false ], - rules: [] + rules: [], } end def build - DataItemFactory.new(true).flag(@flag) + Flags.from_hash(@flag) end def version(value) @@ -69,9 +71,9 @@ def on(value) @flag[:on] = value self end - + def rule(r) - @flag[:rules].append(r.build) + @flag[:rules].append(r.is_a?(RuleBuilder) ? r.build : r) self end @@ -113,7 +115,7 @@ def initialize() @rule = { id: "", variation: 0, - clauses: [] + clauses: [], } end @@ -142,30 +144,70 @@ def track_events(value) end end +class SegmentRuleBuilder + def initialize() + @rule = { + clauses: [], + } + end + + def build + @rule.clone + end + + def clause(c) + @rule[:clauses].append(c) + self + end +end + class SegmentBuilder def initialize(key) @segment = { key: key, version: 1, - included: [], - excluded: [] + included: [], + excluded: [], + includedContexts: [], + excludedContexts: [], + rules: [], } end def build - DataItemFactory.new(true).segment(@segment) + Segments.from_hash(@segment) + end + + def version(value) + @segment[:version] = value + self end - + def included(*keys) @segment[:included] = keys self end + def included_contexts(kind, *keys) + @segment[:includedContexts].append({ contextKind: kind, values: keys }) + self + end + + def excluded_contexts(kind, *keys) + @segment[:excludedContexts].append({ contextKind: kind, values: keys }) + self + end + def excluded(*keys) @segment[:excluded] = keys self end + def rule(r) + @segment[:rules].append(r.is_a?(SegmentRuleBuilder) ? r.build : r) + self + end + def unbounded(value) @segment[:unbounded] = value self @@ -177,20 +219,39 @@ def generation(value) end end -class Clauses - def self.match_segment(segment) +class DataSetBuilder + def initialize + @flags = {} + @segments = {} + end + + def flag(data) + f = LaunchDarkly::Impl::Model.deserialize(LaunchDarkly::FEATURES, data) + @flags[f.key.to_sym] = f + self + end + + def segment(data) + s = LaunchDarkly::Impl::Model.deserialize(LaunchDarkly::SEGMENTS, data) + @segments[s.key.to_sym] = s + self + end + + def to_store_data { - "attribute": "", - "op": "segmentMatch", - "values": [ segment.is_a?(Hash) ? segment[:key] : segment ] + LaunchDarkly::FEATURES => @flags, + LaunchDarkly::SEGMENTS => @segments, } end - def self.match_user(user) + def to_hash { - "attribute": "key", - "op": "in", - "values": [ user[:key] ] + flags: @flags, + segments: @segments, } end + + def to_json(*) + to_hash.to_json + end end diff --git a/spec/newrelic_spec.rb b/spec/newrelic_spec.rb deleted file mode 100644 index f20afa42..00000000 --- a/spec/newrelic_spec.rb +++ /dev/null @@ -1,5 +0,0 @@ -require "spec_helper" - -describe LaunchDarkly::LDNewRelic do - subject { LaunchDarkly::LDNewRelic } -end diff --git a/spec/polling_spec.rb b/spec/polling_spec.rb index ca36364c..c8f801c2 100644 --- a/spec/polling_spec.rb +++ b/spec/polling_spec.rb @@ -20,11 +20,11 @@ def with_processor(store) segment = { key: 'segkey', version: 1 } all_data = { LaunchDarkly::FEATURES => { - flagkey: flag + flagkey: flag, }, LaunchDarkly::SEGMENTS => { - segkey: segment - } + segkey: segment, + }, } it 'puts feature data in store' do diff --git a/spec/reference_spec.rb b/spec/reference_spec.rb new file mode 100644 index 00000000..38b5e403 --- /dev/null +++ b/spec/reference_spec.rb @@ -0,0 +1,110 @@ +require "ldclient-rb/reference" + +describe LaunchDarkly::Reference do + subject { LaunchDarkly::Reference } + + it "determines invalid formats" do + [ + # Empty reference failures + [nil, 'empty reference'], + ["", 'empty reference'], + ["/", 'empty reference'], + + # Double or trailing slashes + ["//", 'double or trailing slash'], + ["/a//b", 'double or trailing slash'], + ["/a/b/", 'double or trailing slash'], + + # Invalid escape sequence + ["/a~x", 'invalid escape sequence'], + ["/a~", 'invalid escape sequence'], + ["/a/b~x", 'invalid escape sequence'], + ["/a/b~", 'invalid escape sequence'], + + ].each do |(path, msg)| + ref = subject.create(path) + expect(ref.raw_path).to eq(path) + expect(ref.error).to eq(msg) + end + end + + describe "can handle valid formats" do + it "can process references without a leading slash" do + %w[key kind name name/with/slashes name~0~1with-what-looks-like-escape-sequences].each do |path| + ref = subject.create(path) + + expect(ref.raw_path).to eq(path) + expect(ref.error).to be_nil + expect(ref.depth).to eq(1) + end + end + + it "can handle simple references with a leading slash" do + [ + ["/key", :key], + ["/0", :"0"], + ["/name~1with~1slashes~0and~0tildes", :"name/with/slashes~and~tildes"], + ].each do |(path, component)| + ref = subject.create(path) + + expect(ref.raw_path).to eq(path) + expect(ref.error).to be_nil + expect(ref.depth).to eq(1) + expect(ref.component(0)).to eq(component) + end + end + + it "can access sub-components of varying depths" do + [ + ["key", 1, 0, :key], + ["/key", 1, 0, :key], + + ["/a/b", 2, 0, :a], + ["/a/b", 2, 1, :b], + + ["/a~1b/c", 2, 0, :"a/b"], + ["/a~0b/c", 2, 0, :"a~b"], + + ["/a/10/20/30x", 4, 1, :"10"], + ["/a/10/20/30x", 4, 2, :"20"], + ["/a/10/20/30x", 4, 3, :"30x"], + + # invalid arguments don't cause an error, they just return nil + ["", 0, 0, nil], + ["", 0, -1, nil], + + ["key", 1, -1, nil], + ["key", 1, 1, nil], + + ["/key", 1, -1, nil], + ["/key", 1, 1, nil], + + ["/a/b", 2, -1, nil], + ["/a/b", 2, 2, nil], + ].each do |(path, depth, index, component)| + ref = subject.create(path) + expect(ref.depth).to eq(depth) + expect(ref.component(index)).to eq(component) + end + end + end + + describe "creating literal references" do + it "can create valid references" do + [ + ["name", "name"], + ["a/b", "a/b"], + ["/a/b~c", "/~1a~1b~0c"], + ["/", "/~1"], + ].each do |(literal, path)| + expect(subject.create_literal(literal).raw_path).to eq(subject.create(path).raw_path) + end + end + + it("can detect invalid references") do + [nil, "", true].each do |value| + expect(subject.create_literal(value).error).to eq('empty reference') + end + end + end +end diff --git a/spec/requestor_spec.rb b/spec/requestor_spec.rb index f9f40fa0..7fea7733 100644 --- a/spec/requestor_spec.rb +++ b/spec/requestor_spec.rb @@ -5,8 +5,6 @@ $sdk_key = "secret" describe LaunchDarkly::Requestor do - factory = DataItemFactory.new(true) # true = enable the usual preprocessing logic - def with_requestor(base_uri, opts = {}) r = LaunchDarkly::Requestor.new($sdk_key, LaunchDarkly::Config.new({ base_uri: base_uri, application: {id: "id", version: "version"} }.merge(opts))) begin @@ -34,12 +32,12 @@ def with_requestor(base_uri, opts = {}) end it "parses response" do - expected_data = { flags: { x: factory.flag({ key: "x" }) } } + expected_data = DataSetBuilder.new.flag(FlagBuilder.new("x").build) with_server do |server| with_requestor(server.base_uri.to_s) do |requestor| server.setup_ok_response("/", expected_data.to_json) data = requestor.request_all_data() - expect(data).to eq LaunchDarkly::Impl::Model.make_all_store_data(expected_data) + expect(data).to eq expected_data.to_store_data end end end @@ -49,10 +47,10 @@ def with_requestor(base_uri, opts = {}) logger.level = ::Logger::DEBUG with_server do |server| with_requestor(server.base_uri.to_s, { logger: logger }) do |requestor| - server.setup_ok_response("/", { flags: { x: { key: "y" } } }.to_json) + server.setup_ok_response("/", FlagBuilder.new("x").build.to_json) expect do requestor.request_all_data() - end.to output(/\[LDClient\] Got response from uri\:/).to_stdout_from_any_process + end.to output(/\[LDClient\] Got response from uri:/).to_stdout_from_any_process end end end @@ -83,7 +81,7 @@ def with_requestor(base_uri, opts = {}) requestor.request_all_data() expect(server.requests.count).to eq 1 expect(server.requests[0].header).to include({ - "x-launchdarkly-wrapper" => [ "MyWrapper/1.0" ] + "x-launchdarkly-wrapper" => [ "MyWrapper/1.0" ], }) end end @@ -91,7 +89,7 @@ def with_requestor(base_uri, opts = {}) it "can reuse cached data" do etag = "xyz" - expected_data = { flags: { x: factory.flag({ key: "x" }) } } + expected_data = DataSetBuilder.new.flag(FlagBuilder.new("x").build) with_server do |server| with_requestor(server.base_uri.to_s) do |requestor| server.setup_response("/") do |req, res| @@ -108,7 +106,7 @@ def with_requestor(base_uri, opts = {}) data = requestor.request_all_data() expect(server.requests.count).to eq 2 expect(server.requests[1].header).to include({ "if-none-match" => [ etag ] }) - expect(data).to eq LaunchDarkly::Impl::Model.make_all_store_data(expected_data) + expect(data).to eq expected_data.to_store_data end end end @@ -116,8 +114,8 @@ def with_requestor(base_uri, opts = {}) it "replaces cached data with new data" do etag1 = "abc" etag2 = "xyz" - expected_data1 = { flags: { x: factory.flag({ key: "x" }) } } - expected_data2 = { flags: { y: factory.flag({ key: "y" }) } } + expected_data1 = DataSetBuilder.new.flag(FlagBuilder.new("x").build) + expected_data2 = DataSetBuilder.new.flag(FlagBuilder.new("y").build) with_server do |server| with_requestor(server.base_uri.to_s) do |requestor| server.setup_response("/") do |req, res| @@ -126,14 +124,14 @@ def with_requestor(base_uri, opts = {}) res["ETag"] = etag1 end data = requestor.request_all_data() - expect(data).to eq LaunchDarkly::Impl::Model.make_all_store_data(expected_data1) + expect(data).to eq expected_data1.to_store_data expect(server.requests.count).to eq 1 server.setup_response("/") do |req, res| res.status = 304 end data = requestor.request_all_data() - expect(data).to eq LaunchDarkly::Impl::Model.make_all_store_data(expected_data1) + expect(data).to eq expected_data1.to_store_data expect(server.requests.count).to eq 2 expect(server.requests[1].header).to include({ "if-none-match" => [ etag1 ] }) @@ -143,7 +141,7 @@ def with_requestor(base_uri, opts = {}) res["ETag"] = etag2 end data = requestor.request_all_data() - expect(data).to eq LaunchDarkly::Impl::Model.make_all_store_data(expected_data2) + expect(data).to eq expected_data2.to_store_data expect(server.requests.count).to eq 3 expect(server.requests[2].header).to include({ "if-none-match" => [ etag1 ] }) @@ -151,7 +149,7 @@ def with_requestor(base_uri, opts = {}) res.status = 304 end data = requestor.request_all_data() - expect(data).to eq LaunchDarkly::Impl::Model.make_all_store_data(expected_data2) + expect(data).to eq expected_data2.to_store_data expect(server.requests.count).to eq 4 expect(server.requests[3].header).to include({ "if-none-match" => [ etag2 ] }) end @@ -159,24 +157,24 @@ def with_requestor(base_uri, opts = {}) end it "uses UTF-8 encoding by default" do - content = '{"flags": {"flagkey": {"key": "flagkey", "variations": ["blue", "grėeń"]}}}' + expected_data = DataSetBuilder.new.flag(FlagBuilder.new("flagkey").variations("blue", "grėeń").build) with_server do |server| - server.setup_ok_response("/sdk/latest-all", content, "application/json") + server.setup_ok_response("/sdk/latest-all", expected_data.to_json, "application/json") with_requestor(server.base_uri.to_s) do |requestor| data = requestor.request_all_data - expect(data).to eq(LaunchDarkly::Impl::Model.make_all_store_data(JSON.parse(content, symbolize_names: true))) + expect(data).to eq expected_data.to_store_data end end end it "detects other encodings from Content-Type" do - content = '{"flags": {"flagkey": {"key": "flagkey", "variations": ["proszę", "dziękuję"]}}}' + expected_data = DataSetBuilder.new.flag(FlagBuilder.new("flagkey").variations("proszę", "dziękuję").build) with_server do |server| - server.setup_ok_response("/sdk/latest-all", content.encode(Encoding::ISO_8859_2), + server.setup_ok_response("/sdk/latest-all", expected_data.to_json.encode(Encoding::ISO_8859_2), "text/plain; charset=ISO-8859-2") with_requestor(server.base_uri.to_s) do |requestor| data = requestor.request_all_data - expect(data).to eq(LaunchDarkly::Impl::Model.make_all_store_data(JSON.parse(content, symbolize_names: true))) + expect(data).to eq expected_data.to_store_data end end end @@ -200,14 +198,14 @@ def with_requestor(base_uri, opts = {}) # use a real proxy that really forwards requests to another test server, because # that test server would be at localhost, and proxy environment variables are # ignored if the target is localhost. - expected_data = { flags: { flagkey: factory.flag({ key: "flagkey" }) } } + expected_data = DataSetBuilder.new.flag(FlagBuilder.new("x").build) with_server do |proxy| proxy.setup_ok_response("/sdk/latest-all", expected_data.to_json, "application/json", { "etag" => "x" }) begin ENV["http_proxy"] = proxy.base_uri.to_s with_requestor(fake_target_uri) do |requestor| data = requestor.request_all_data - expect(data).to eq(LaunchDarkly::Impl::Model.make_all_store_data(expected_data)) + expect(data).to eq expected_data.to_store_data end ensure ENV["http_proxy"] = nil diff --git a/spec/segment_store_spec_base.rb b/spec/segment_store_spec_base.rb index 02ecd448..c3ddf82a 100644 --- a/spec/segment_store_spec_base.rb +++ b/spec/segment_store_spec_base.rb @@ -7,7 +7,7 @@ key: "test-segment", version: 11, salt: "718ea30a918a4eba8734b57ab1a93227", - rules: [] + rules: [], } } let(:key0) { segment0[:key].to_sym } diff --git a/spec/stream_spec.rb b/spec/stream_spec.rb index 4f2d7b85..cb89830a 100644 --- a/spec/stream_spec.rb +++ b/spec/stream_spec.rb @@ -3,8 +3,6 @@ require "spec_helper" describe LaunchDarkly::StreamProcessor do - factory = DataItemFactory.new(true) # true = enable the usual preprocessing logic - subject { LaunchDarkly::StreamProcessor } let(:config) { LaunchDarkly::Config.new } let(:processor) { subject.new("sdk_key", config) } @@ -18,16 +16,16 @@ it "will accept PUT methods" do processor.send(:process_message, put_message) - expect(config.feature_store.get(LaunchDarkly::FEATURES, "asdf")).to eq(factory.flag(key: "asdf")) - expect(config.feature_store.get(LaunchDarkly::SEGMENTS, "segkey")).to eq(factory.segment(key: "segkey")) + expect(config.feature_store.get(LaunchDarkly::FEATURES, "asdf")).to eq(Flags.from_hash(key: "asdf")) + expect(config.feature_store.get(LaunchDarkly::SEGMENTS, "segkey")).to eq(Segments.from_hash(key: "segkey")) end it "will accept PATCH methods for flags" do processor.send(:process_message, patch_flag_message) - expect(config.feature_store.get(LaunchDarkly::FEATURES, "asdf")).to eq(factory.flag(key: "asdf", version: 1)) + expect(config.feature_store.get(LaunchDarkly::FEATURES, "asdf")).to eq(Flags.from_hash(key: "asdf", version: 1)) end it "will accept PATCH methods for segments" do processor.send(:process_message, patch_seg_message) - expect(config.feature_store.get(LaunchDarkly::SEGMENTS, "asdf")).to eq(factory.segment(key: "asdf", version: 1)) + expect(config.feature_store.get(LaunchDarkly::SEGMENTS, "asdf")).to eq(Segments.from_hash(key: "asdf", version: 1)) end it "will accept DELETE methods for flags" do processor.send(:process_message, patch_flag_message) diff --git a/spec/user_filter_spec.rb b/spec/user_filter_spec.rb deleted file mode 100644 index 96814289..00000000 --- a/spec/user_filter_spec.rb +++ /dev/null @@ -1,91 +0,0 @@ -require "spec_helper" - -describe LaunchDarkly::UserFilter do - subject { LaunchDarkly::UserFilter } - - let(:base_config) { LaunchDarkly::Config.new } - let(:config_with_all_attrs_private) { LaunchDarkly::Config.new({ all_attributes_private: true })} - let(:config_with_some_attrs_private) { LaunchDarkly::Config.new({ private_attribute_names: ['firstName', 'bizzle'] })} - - # users to serialize - - let(:user) { - { key: 'abc', firstName: 'Sue', custom: { bizzle: 'def', dizzle: 'ghi' }} - } - - let(:user_specifying_own_private_attr) { - u = user.clone - u[:privateAttributeNames] = [ 'dizzle', 'unused' ] - u - } - - let(:user_with_unknown_top_level_attrs) { - { key: 'abc', firstName: 'Sue', species: 'human', hatSize: 6, custom: { bizzle: 'def', dizzle: 'ghi' }} - } - - let(:anon_user) { - { key: 'abc', anonymous: 'true', custom: { bizzle: 'def', dizzle: 'ghi' }} - } - - # expected results from serializing user - - let(:user_with_all_attrs_hidden) { - { key: 'abc', custom: { }, privateAttrs: [ 'bizzle', 'dizzle', 'firstName' ]} - } - - let(:user_with_some_attrs_hidden) { - { key: 'abc', custom: { dizzle: 'ghi' }, privateAttrs: [ 'bizzle', 'firstName' ]} - } - - let(:user_with_own_specified_attr_hidden) { - { key: 'abc', firstName: 'Sue', custom: { bizzle: 'def' }, privateAttrs: [ 'dizzle' ]} - } - - let(:anon_user_with_all_attrs_hidden) { - { key: 'abc', anonymous: 'true', custom: { }, privateAttrs: [ 'bizzle', 'dizzle' ]} - } - - describe "serialize_events" do - it "includes all user attributes by default" do - uf = LaunchDarkly::UserFilter.new(base_config) - result = uf.transform_user_props(user) - expect(result).to eq user - end - - it "hides all except key if all_attributes_private is true" do - uf = LaunchDarkly::UserFilter.new(config_with_all_attrs_private) - result = uf.transform_user_props(user) - expect(result).to eq user_with_all_attrs_hidden - end - - it "hides some attributes if private_attribute_names is set" do - uf = LaunchDarkly::UserFilter.new(config_with_some_attrs_private) - result = uf.transform_user_props(user) - expect(result).to eq user_with_some_attrs_hidden - end - - it "hides attributes specified in per-user privateAttrs" do - uf = LaunchDarkly::UserFilter.new(base_config) - result = uf.transform_user_props(user_specifying_own_private_attr) - expect(result).to eq user_with_own_specified_attr_hidden - end - - it "looks at both per-user privateAttrs and global config" do - uf = LaunchDarkly::UserFilter.new(config_with_some_attrs_private) - result = uf.transform_user_props(user_specifying_own_private_attr) - expect(result).to eq user_with_all_attrs_hidden - end - - it "strips out any unknown top-level attributes" do - uf = LaunchDarkly::UserFilter.new(base_config) - result = uf.transform_user_props(user_with_unknown_top_level_attrs) - expect(result).to eq user - end - - it "leaves the anonymous attribute as is" do - uf = LaunchDarkly::UserFilter.new(config_with_all_attrs_private) - result = uf.transform_user_props(anon_user) - expect(result).to eq anon_user_with_all_attrs_hidden - end - end -end