diff --git a/.circleci/config.yml b/.circleci/config.yml index 6fb11b32..8201b95d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -19,7 +19,7 @@ ruby-docker-template: &ruby-docker-template gem install jruby-openssl; # required by bundler, no effect on Ruby MRI fi - run: ruby -v - - run: gem install bundler -v "~> 1.7" + - run: gem install bundler -v 1.17.3 - run: bundle install - run: mkdir ./rspec - run: bundle exec rspec --format progress --format RspecJunitFormatter -o ./rspec/rspec.xml spec @@ -32,28 +32,38 @@ jobs: test-2.2: <<: *ruby-docker-template docker: - - image: circleci/ruby:2.2.9-jessie + - image: circleci/ruby:2.2.10-jessie + - image: consul - image: redis + - image: amazon/dynamodb-local test-2.3: <<: *ruby-docker-template docker: - - image: circleci/ruby:2.3.6-jessie + - image: circleci/ruby:2.3.7-jessie + - image: consul - image: redis + - image: amazon/dynamodb-local test-2.4: <<: *ruby-docker-template docker: - - image: circleci/ruby:2.4.4-stretch + - image: circleci/ruby:2.4.5-stretch + - image: consul - image: redis + - image: amazon/dynamodb-local test-2.5: <<: *ruby-docker-template docker: - - image: circleci/ruby:2.5.1-stretch + - image: circleci/ruby:2.5.3-stretch + - image: consul - image: redis + - image: amazon/dynamodb-local test-jruby-9.2: <<: *ruby-docker-template docker: - image: circleci/jruby:9-jdk + - image: consul - image: redis + - image: amazon/dynamodb-local # The following very slow job uses an Ubuntu container to run the Ruby versions that # CircleCI doesn't provide Docker images for. @@ -63,8 +73,11 @@ jobs: environment: - RUBIES: "jruby-9.1.17.0" steps: + - run: sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - run: sudo apt-get -q update - run: sudo apt-get -qy install redis-server + - run: sudo apt-cache policy docker-ce + - run: sudo apt-get -qy install docker-ce - checkout - run: name: install all Ruby versions @@ -82,10 +95,24 @@ jobs: fi # bundler 2.0 may be preinstalled, we need to remove it if so yes | gem uninstall bundler --version '>=2.0' || true; - gem install bundler -v "~> 1.7"; + gem install bundler -v 1.17.3; bundle install; mv Gemfile.lock "Gemfile.lock.$i" done + - run: + name: start DynamoDB + command: docker run -p 8000:8000 amazon/dynamodb-local + background: true + - run: + name: download Consul + command: wget https://releases.hashicorp.com/consul/0.8.0/consul_0.8.0_linux_amd64.zip + - run: + name: extract Consul + command: unzip consul_0.8.0_linux_amd64.zip + - run: + name: start Consul + command: ./consul agent -dev + background: true - run: name: run tests for all versions shell: /bin/bash -leo pipefail diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 96147068..c6b8dd20 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,15 +2,3 @@ Contributing to LaunchDarkly SDK for Ruby ========================================= We encourage pull-requests and other contributions from the community. We've also published an [SDK contributor's guide](http://docs.launchdarkly.com/docs/sdk-contributors-guide) that provides a detailed explanation of how our SDKs work. - -Dependencies ------------- -[ld-em-eventsource](https://github.com/launchdarkly/em-eventsource) - - -Style ------ - -Our pull requests have [Hound CI](https://houndci.com/) set up to do style checking. -We also run [Rubocop](https://github.com/bbatsov/rubocop). - diff --git a/Gemfile.lock b/Gemfile.lock index 68212c17..2e96a86a 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -5,20 +5,33 @@ PATH concurrent-ruby (~> 1.0) faraday (>= 0.9, < 2) faraday-http-cache (>= 1.3.0, < 3) - http_tools (~> 0.4.5) json (>= 1.8, < 3) + ld-eventsource (~> 1.0) net-http-persistent (>= 2.9, < 4.0) semantic (~> 1.6) - socketry (~> 0.5.1) GEM remote: https://rubygems.org/ specs: + aws-eventstream (1.0.1) + aws-partitions (1.128.0) + aws-sdk-core (3.44.2) + aws-eventstream (~> 1.0) + aws-partitions (~> 1.0) + aws-sigv4 (~> 1.0) + jmespath (~> 1.0) + aws-sdk-dynamodb (1.19.0) + aws-sdk-core (~> 3, >= 3.39.0) + aws-sigv4 (~> 1.0) + aws-sigv4 (1.0.3) codeclimate-test-reporter (0.6.0) simplecov (>= 0.7.1, < 1.0.0) concurrent-ruby (1.1.4) connection_pool (2.2.1) diff-lcs (1.3) + diplomat (2.0.2) + faraday (~> 0.9) + json docile (1.1.5) faraday (0.15.4) multipart-post (>= 1.2, < 3) @@ -27,10 +40,14 @@ GEM ffi (1.9.25) ffi (1.9.25-java) hitimes (1.3.0) - hitimes (1.3.0-java) http_tools (0.4.5) + jmespath (1.4.0) json (1.8.6) json (1.8.6-java) + ld-eventsource (1.0.0) + concurrent-ruby (~> 1.0) + http_tools (~> 0.4.5) + socketry (~> 0.5.1) listen (3.1.5) rb-fsevent (~> 0.9, >= 0.9.4) rb-inotify (~> 0.9, >= 0.9.7) @@ -74,9 +91,11 @@ PLATFORMS ruby DEPENDENCIES + aws-sdk-dynamodb (~> 1.18) bundler (~> 1.7) codeclimate-test-reporter (~> 0) connection_pool (>= 2.1.2) + diplomat (>= 2.0.2) ldclient-rb! listen (~> 3.0) rake (~> 10.0) diff --git a/README.md b/README.md index ead2bb6b..f4dc72b7 100644 --- a/README.md +++ b/README.md @@ -15,37 +15,37 @@ This version of the LaunchDarkly SDK has a minimum Ruby version of 2.2.6, or 9.1 Quick setup ----------- -0. Install the Ruby SDK with `gem` +1. Install the Ruby SDK with `gem` -```shell + ```shell gem install ldclient-rb ``` -1. Require the LaunchDarkly client: +2. Require the LaunchDarkly client: -```ruby + ```ruby require 'ldclient-rb' ``` -2. Create a new LDClient with your SDK key: +3. Create a new LDClient with your SDK key: -```ruby + ```ruby client = LaunchDarkly::LDClient.new("your_sdk_key") ``` ### Ruby on Rails -0. Add `gem 'ldclient-rb'` to your Gemfile and `bundle install` +1. Add `gem 'ldclient-rb'` to your Gemfile and `bundle install` -1. Initialize the launchdarkly client in `config/initializers/launchdarkly.rb`: +2. Initialize the launchdarkly client in `config/initializers/launchdarkly.rb`: -```ruby + ```ruby Rails.configuration.ld_client = LaunchDarkly::LDClient.new("your_sdk_key") ``` -2. You may want to include a function in your ApplicationController +3. You may want to include a function in your ApplicationController -```ruby + ```ruby def launchdarkly_settings if current_user.present? { @@ -72,31 +72,44 @@ Rails.configuration.ld_client = LaunchDarkly::LDClient.new("your_sdk_key") end ``` -3. In your controllers, access the client using +4. In your controllers, access the client using -```ruby + ```ruby Rails.application.config.ld_client.variation('your.flag.key', launchdarkly_settings, false) ``` Note that this gem will automatically switch to using the Rails logger it is detected. +Your first feature flag +----------------------- + +1. Create a new feature flag on your [dashboard](https://app.launchdarkly.com). +2. In your application code, use the feature's key to check whether the flag is on for each user: + +```ruby +if client.variation("your.flag.key", {key: "user@test.com"}, false) + # application code to show the feature +else + # the code to run if the feature is off +end +``` + HTTPS proxy ------------- -The Ruby SDK uses Faraday and Socketry to handle its network traffic. Both of these provide built-in support for the use of an HTTPS proxy. If the HTTPS_PROXY environment variable is present then the SDK will proxy all network requests through the URL provided. +----------- + +The Ruby SDK uses Faraday and Socketry to handle its network traffic. Both of these provide built-in support for the use of an HTTPS proxy. If the HTTPS_PROXY environment variable is present then the SDK will proxy all network requests through the URL provided. (HTTP_PROXY is not used because all LaunchDarkly services require HTTPS.) How to set the HTTPS_PROXY environment variable on Mac/Linux systems: ``` export HTTPS_PROXY=https://web-proxy.domain.com:8080 ``` - How to set the HTTPS_PROXY environment variable on Windows systems: ``` set HTTPS_PROXY=https://web-proxy.domain.com:8080 ``` - If your proxy requires authentication then you can prefix the URN with your login information: ``` export HTTPS_PROXY=http://user:pass@web-proxy.domain.com:8080 @@ -106,29 +119,22 @@ or set HTTPS_PROXY=http://user:pass@web-proxy.domain.com:8080 ``` +Database integrations +--------------------- -Your first feature flag ------------------------ - -1. Create a new feature flag on your [dashboard](https://app.launchdarkly.com) -2. In your application code, use the feature's key to check whether the flag is on for each user: - -```ruby -if client.variation("your.flag.key", {key: "user@test.com"}, false) - # application code to show the feature -else - # the code to run if the feature is off -end -``` +Feature flag data can be kept in a persistent store using Redis, DynamoDB, or Consul. These adapters are implemented in the `LaunchDarkly::Integrations::Redis`, `LaunchDarkly::Integrations::DynamoDB`, and `LaunchDarkly::Integrations::Consul` modules; to use them, call the `new_feature_store` method in the module, and put the returned object in the `feature_store` property of your client configuration. See the [API documentation](https://www.rubydoc.info/gems/ldclient-rb/LaunchDarkly/Integrations) and the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store) for more information. Using flag data from a file --------------------------- -For testing purposes, the SDK can be made to read feature flag state from a file or files instead of connecting to LaunchDarkly. See [`file_data_source.rb`](https://github.com/launchdarkly/ruby-client/blob/master/lib/ldclient-rb/file_data_source.rb) for more details. + +For testing purposes, the SDK can be made to read feature flag state from a file or files instead of connecting to LaunchDarkly. See `LaunchDarkly::FileDataSource` or the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/reading-flags-from-a-file) for more details. Learn more ----------- -Check out our [documentation](http://docs.launchdarkly.com) for in-depth instructions on configuring and using LaunchDarkly. You can also head straight to the [complete reference guide for this SDK](http://docs.launchdarkly.com/docs/ruby-sdk-reference). +Check out our [documentation](http://docs.launchdarkly.com) for in-depth instructions on configuring and using LaunchDarkly. You can also head straight to the [reference guide for this SDK](http://docs.launchdarkly.com/docs/ruby-sdk-reference). + +Generated API documentation is on [RubyDoc.info](https://www.rubydoc.info/gems/ldclient-rb). Testing ------- @@ -138,10 +144,10 @@ We run integration tests for all our SDKs using a centralized test harness. This Contributing ------------ -See [Contributing](https://github.com/launchdarkly/ruby-client/blob/master/CONTRIBUTING.md) +See [Contributing](https://github.com/launchdarkly/ruby-client/blob/master/CONTRIBUTING.md). About LaunchDarkly ------------ +------------------ * LaunchDarkly is a continuous delivery platform that provides feature flags as a service and allows developers to iterate quickly and safely. We allow you to easily flag your features and manage them from the LaunchDarkly dashboard. With LaunchDarkly, you can: * Roll out a new feature to a subset of your users (like a group of users who opt-in to a beta tester group), gathering feedback and bug reports from real-world use cases. @@ -153,9 +159,9 @@ About LaunchDarkly * [JavaScript](http://docs.launchdarkly.com/docs/js-sdk-reference "LaunchDarkly JavaScript SDK") * [PHP](http://docs.launchdarkly.com/docs/php-sdk-reference "LaunchDarkly PHP SDK") * [Python](http://docs.launchdarkly.com/docs/python-sdk-reference "LaunchDarkly Python SDK") - * [Python Twisted](http://docs.launchdarkly.com/docs/python-twisted-sdk-reference "LaunchDarkly Python Twisted SDK") * [Go](http://docs.launchdarkly.com/docs/go-sdk-reference "LaunchDarkly Go SDK") * [Node.JS](http://docs.launchdarkly.com/docs/node-sdk-reference "LaunchDarkly Node SDK") + * [Electron](http://docs.launchdarkly.com/docs/electron-sdk-reference "LaunchDarkly Electron SDK") * [.NET](http://docs.launchdarkly.com/docs/dotnet-sdk-reference "LaunchDarkly .Net SDK") * [Ruby](http://docs.launchdarkly.com/docs/ruby-sdk-reference "LaunchDarkly Ruby SDK") * [iOS](http://docs.launchdarkly.com/docs/ios-sdk-reference "LaunchDarkly iOS SDK") diff --git a/ldclient-rb.gemspec b/ldclient-rb.gemspec index 4e96b6b4..a7b64dc7 100644 --- a/ldclient-rb.gemspec +++ b/ldclient-rb.gemspec @@ -13,7 +13,7 @@ Gem::Specification.new do |spec| spec.summary = "LaunchDarkly SDK for Ruby" spec.description = "Official LaunchDarkly SDK for Ruby" spec.homepage = "https://github.com/launchdarkly/ruby-client" - spec.license = "Apache 2.0" + spec.license = "Apache-2.0" spec.files = `git ls-files -z`.split("\x0") spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } @@ -21,9 +21,11 @@ Gem::Specification.new do |spec| spec.require_paths = ["lib"] spec.extensions = 'ext/mkrf_conf.rb' + spec.add_development_dependency "aws-sdk-dynamodb", "~> 1.18" spec.add_development_dependency "bundler", "~> 1.7" spec.add_development_dependency "rspec", "~> 3.2" spec.add_development_dependency "codeclimate-test-reporter", "~> 0" + spec.add_development_dependency "diplomat", ">= 2.0.2" spec.add_development_dependency "redis", "~> 3.3.5" spec.add_development_dependency "connection_pool", ">= 2.1.2" spec.add_development_dependency "rake", "~> 10.0" @@ -37,6 +39,5 @@ Gem::Specification.new do |spec| spec.add_runtime_dependency "semantic", "~> 1.6" spec.add_runtime_dependency "net-http-persistent", [">= 2.9", "< 4.0"] spec.add_runtime_dependency "concurrent-ruby", "~> 1.0" - spec.add_runtime_dependency "http_tools", '~> 0.4.5' - spec.add_runtime_dependency "socketry", "~> 0.5.1" + spec.add_runtime_dependency "ld-eventsource", '~> 1.0' end diff --git a/lib/ldclient-rb.rb b/lib/ldclient-rb.rb index d3ee6ffc..e5477ecb 100644 --- a/lib/ldclient-rb.rb +++ b/lib/ldclient-rb.rb @@ -1,4 +1,12 @@ + +# +# Namespace for the LaunchDarkly Ruby SDK. +# +module LaunchDarkly +end + require "ldclient-rb/version" +require "ldclient-rb/interfaces" require "ldclient-rb/util" require "ldclient-rb/evaluation" require "ldclient-rb/flags_state" @@ -16,6 +24,6 @@ require "ldclient-rb/non_blocking_thread_pool" require "ldclient-rb/event_summarizer" require "ldclient-rb/events" -require "ldclient-rb/redis_store" require "ldclient-rb/requestor" require "ldclient-rb/file_data_source" +require "ldclient-rb/integrations" diff --git a/lib/ldclient-rb/cache_store.rb b/lib/ldclient-rb/cache_store.rb index 164534fb..8451bb5f 100644 --- a/lib/ldclient-rb/cache_store.rb +++ b/lib/ldclient-rb/cache_store.rb @@ -7,6 +7,7 @@ module LaunchDarkly # # @see https://github.com/plataformatec/faraday-http-cache # @see https://github.com/ruby-concurrency + # @private # class ThreadSafeMemoryStore # diff --git a/lib/ldclient-rb/config.rb b/lib/ldclient-rb/config.rb index dc89d30a..34f4f67b 100644 --- a/lib/ldclient-rb/config.rb +++ b/lib/ldclient-rb/config.rb @@ -8,66 +8,36 @@ module LaunchDarkly # # class Config + # rubocop:disable Metrics/AbcSize, Metrics/PerceivedComplexity + # # Constructor for creating custom LaunchDarkly configurations. # # @param opts [Hash] the configuration options - # @option opts [Logger] :logger A logger to use for messages from the - # LaunchDarkly client. Defaults to the Rails logger in a Rails - # environment, or stdout otherwise. - # @option opts [String] :base_uri ("https://app.launchdarkly.com") The base - # URL for the LaunchDarkly server. Most users should use the default value. - # @option opts [String] :stream_uri ("https://stream.launchdarkly.com") The - # URL for the LaunchDarkly streaming events server. Most users should use the default value. - # @option opts [String] :events_uri ("https://events.launchdarkly.com") The - # URL for the LaunchDarkly events server. Most users should use the default value. - # @option opts [Integer] :capacity (10000) The capacity of the events - # buffer. The client buffers up to this many events in memory before - # flushing. If the capacity is exceeded before the buffer is flushed, - # events will be discarded. - # @option opts [Float] :flush_interval (30) The number of seconds between - # flushes of the event buffer. - # @option opts [Float] :read_timeout (10) The read timeout for network - # connections in seconds. - # @option opts [Float] :connect_timeout (2) The connect timeout for network - # connections in seconds. - # @option opts [Object] :cache_store A cache store for the Faraday HTTP caching - # library. Defaults to the Rails cache in a Rails environment, or a - # thread-safe in-memory store otherwise. - # @option opts [Object] :feature_store A store for feature flags and related data. Defaults to an in-memory - # cache, or you can use RedisFeatureStore. - # @option opts [Boolean] :use_ldd (false) Whether you are using the LaunchDarkly relay proxy in - # daemon mode. In this configuration, the client will not use a streaming connection to listen - # for updates, but instead will get feature state from a Redis instance. The `stream` and - # `poll_interval` options will be ignored if this option is set to true. - # @option opts [Boolean] :offline (false) Whether the client should be initialized in - # offline mode. In offline mode, default values are returned for all flags and no - # remote network requests are made. - # @option opts [Float] :poll_interval (30) The number of seconds between polls for flag updates - # if streaming is off. - # @option opts [Boolean] :stream (true) Whether or not the streaming API should be used to receive flag updates. - # Streaming should only be disabled on the advice of LaunchDarkly support. - # @option opts [Boolean] all_attributes_private (false) If true, all user attributes (other than the key) - # will be private, not just the attributes specified in `private_attribute_names`. - # @option opts [Array] :private_attribute_names Marks a set of attribute names private. Any users sent to - # LaunchDarkly with this configuration active will have attributes with these names removed. - # @option opts [Boolean] :send_events (true) Whether or not to send events back to LaunchDarkly. - # This differs from `offline` in that it affects only the sending of client-side events, not - # streaming or polling for events from the server. - # @option opts [Integer] :user_keys_capacity (1000) The number of user keys that the event processor - # can remember at any one time, so that duplicate user details will not be sent in analytics events. - # @option opts [Float] :user_keys_flush_interval (300) The interval in seconds at which the event - # processor will reset its set of known user keys. - # @option opts [Boolean] :inline_users_in_events (false) Whether to include full user details in every - # analytics event. By default, events will only include the user key, except for one "index" event - # that provides the full details for the user. - # @option opts [Object] :update_processor (DEPRECATED) An object that will receive feature flag data from - # LaunchDarkly. Defaults to either the streaming or the polling processor, can be customized for tests. - # @option opts [Object] :update_processor_factory A function that takes the SDK and configuration object - # as parameters, and returns an object that can obtain feature flag data and put it into the feature - # store. Defaults to creating either the streaming or the polling processor, can be customized for tests. - # @return [type] [description] - # rubocop:disable Metrics/AbcSize, Metrics/PerceivedComplexity + # @option opts [Logger] :logger See {#logger}. + # @option opts [String] :base_uri ("https://app.launchdarkly.com") See {#base_uri}. + # @option opts [String] :stream_uri ("https://stream.launchdarkly.com") See {#stream_uri}. + # @option opts [String] :events_uri ("https://events.launchdarkly.com") See {#events_uri}. + # @option opts [Integer] :capacity (10000) See {#capacity}. + # @option opts [Float] :flush_interval (30) See {#flush_interval}. + # @option opts [Float] :read_timeout (10) See {#read_timeout}. + # @option opts [Float] :connect_timeout (2) See {#connect_timeout}. + # @option opts [Object] :cache_store See {#cache_store}. + # @option opts [Object] :feature_store See {#feature_store}. + # @option opts [Boolean] :use_ldd (false) See {#use_ldd?}. + # @option opts [Boolean] :offline (false) See {#offline?}. + # @option opts [Float] :poll_interval (30) See {#poll_interval}. + # @option opts [Boolean] :stream (true) See {#stream?}. + # @option opts [Boolean] all_attributes_private (false) See {#all_attributes_private}. + # @option opts [Array] :private_attribute_names See {#private_attribute_names}. + # @option opts [Boolean] :send_events (true) See {#send_events}. + # @option opts [Integer] :user_keys_capacity (1000) See {#user_keys_capacity}. + # @option opts [Float] :user_keys_flush_interval (300) See {#user_keys_flush_interval}. + # @option opts [Boolean] :inline_users_in_events (false) See {#inline_users_in_events}. + # @option opts [Object] :data_source See {#data_source}. + # @option opts [Object] :update_processor Obsolete synonym for `data_source`. + # @option opts [Object] :update_processor_factory Obsolete synonym for `data_source`. + # def initialize(opts = {}) @base_uri = (opts[:base_uri] || Config.default_base_uri).chomp("/") @stream_uri = (opts[:stream_uri] || Config.default_stream_uri).chomp("/") @@ -90,48 +60,62 @@ def initialize(opts = {}) @user_keys_capacity = opts[:user_keys_capacity] || Config.default_user_keys_capacity @user_keys_flush_interval = opts[:user_keys_flush_interval] || Config.default_user_keys_flush_interval @inline_users_in_events = opts[:inline_users_in_events] || false + @data_source = opts[:data_source] || opts[:update_processor] || opts[:update_processor_factory] @update_processor = opts[:update_processor] @update_processor_factory = opts[:update_processor_factory] end # - # The base URL for the LaunchDarkly server. + # The base URL for the LaunchDarkly server. This is configurable mainly for testing + # purposes; most users should use the default value. + # @return [String] # - # @return [String] The configured base URL for the LaunchDarkly server. attr_reader :base_uri # - # The base URL for the LaunchDarkly streaming server. + # The base URL for the LaunchDarkly streaming server. This is configurable mainly for testing + # purposes; most users should use the default value. + # @return [String] # - # @return [String] The configured base URL for the LaunchDarkly streaming server. attr_reader :stream_uri # - # The base URL for the LaunchDarkly events server. + # The base URL for the LaunchDarkly events server. This is configurable mainly for testing + # purposes; most users should use the default value. + # @return [String] # - # @return [String] The configured base URL for the LaunchDarkly events server. attr_reader :events_uri # # Whether streaming mode should be enabled. Streaming mode asynchronously updates - # feature flags in real-time using server-sent events. + # feature flags in real-time using server-sent events. Streaming is enabled by default, and + # should only be disabled on the advice of LaunchDarkly support. + # @return [Boolean] # - # @return [Boolean] True if streaming mode should be enabled def stream? @stream end # - # Whether to use the LaunchDarkly relay proxy in daemon mode. In this mode, we do - # not use polling or streaming to get feature flag updates from the server, but instead - # read them from a Redis instance that is updated by the proxy. + # Whether to use the LaunchDarkly relay proxy in daemon mode. In this mode, the client does not + # use polling or streaming to get feature flag updates from the server, but instead reads them + # from the {#feature_store feature store}, which is assumed to be a database that is populated by + # a LaunchDarkly relay proxy. For more information, see ["The relay proxy"](https://docs.launchdarkly.com/v2.0/docs/the-relay-proxy) + # and ["Using a persistent feature store"](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store). + # + # All other properties related to streaming or polling are ignored if this option is set to true. + # + # @return [Boolean] # - # @return [Boolean] True if using the LaunchDarkly relay proxy in daemon mode def use_ldd? @use_ldd end - # TODO docs + # + # Whether the client should be initialized in offline mode. In offline mode, default values are + # returned for all flags and no remote network requests are made. + # @return [Boolean] + # def offline? @offline end @@ -139,20 +123,23 @@ def offline? # # The number of seconds between flushes of the event buffer. Decreasing the flush interval means # that the event buffer is less likely to reach capacity. + # @return [Float] # - # @return [Float] The configured number of seconds between flushes of the event buffer. attr_reader :flush_interval # # The number of seconds to wait before polling for feature flag updates. This option has no - # effect unless streaming is disabled + # effect unless streaming is disabled. + # @return [Float] + # attr_reader :poll_interval # # The configured logger for the LaunchDarkly client. The client library uses the log to - # print warning and error messages. + # print warning and error messages. If not specified, this defaults to the Rails logger + # in a Rails environment, or stdout otherwise. + # @return [Logger] # - # @return [Logger] The configured logger attr_reader :logger # @@ -161,114 +148,206 @@ def offline? # the buffer is flushed, events will be discarded. # Increasing the capacity means that events are less likely to be discarded, # at the cost of consuming more memory. + # @return [Integer] # - # @return [Integer] The configured capacity of the event buffer attr_reader :capacity # - # The store for the Faraday HTTP caching library. Stores should respond to - # 'read' and 'write' requests. + # A store for HTTP caching. This must support the semantics used by the + # [`faraday-http-cache`](https://github.com/plataformatec/faraday-http-cache) gem. Defaults + # to the Rails cache in a Rails environment, or a thread-safe in-memory store otherwise. + # @return [Object] # - # @return [Object] The configured store for the Faraday HTTP caching library. attr_reader :cache_store # - # The read timeout for network connections in seconds. + # The read timeout for network connections in seconds. This does not apply to the streaming + # connection, which uses a longer timeout since the server does not send data constantly. + # @return [Float] # - # @return [Float] The read timeout in seconds. attr_reader :read_timeout # # The connect timeout for network connections in seconds. + # @return [Float] # - # @return [Float] The connect timeout in seconds. attr_reader :connect_timeout # - # A store for feature flag configuration rules. + # A store for feature flags and related data. The client uses it to store all data received + # from LaunchDarkly, and uses the last stored data when evaluating flags. Defaults to + # {InMemoryFeatureStore}; for other implementations, see {LaunchDarkly::Integrations}. + # + # For more information, see ["Using a persistent feature store"](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store). + # + # @return [LaunchDarkly::Interfaces::FeatureStore] # attr_reader :feature_store - # The proxy configuration string + # + # The proxy configuration string. + # @return [String] # attr_reader :proxy + # + # True if all user attributes (other than the key) should be considered private. This means + # that the attribute values will not be sent to LaunchDarkly in analytics events and will not + # appear on the LaunchDarkly dashboard. + # @return [Boolean] + # @see #private_attribute_names + # attr_reader :all_attributes_private + # + # A list of user attribute names that should always be considered private. This means that the + # attribute values will not be sent to LaunchDarkly in analytics events and will not appear on + # the LaunchDarkly dashboard. + # + # You can also specify the same behavior for an individual flag evaluation by storing an array + # of attribute names in the `:privateAttributeNames` property (note camelcase name) of the + # user object. + # + # @return [Array] + # @see #all_attributes_private + # attr_reader :private_attribute_names # - # Whether to send events back to LaunchDarkly. + # Whether to send events back to LaunchDarkly. This differs from {#offline?} in that it affects + # only the sending of client-side events, not streaming or polling for events from the server. + # @return [Boolean] # attr_reader :send_events # - # The number of user keys that the event processor can remember at any one time, so that - # duplicate user details will not be sent in analytics events. + # The number of user keys that the event processor can remember at any one time. This reduces the + # amount of duplicate user details sent in analytics events. + # @return [Integer] + # @see #user_keys_flush_interval # attr_reader :user_keys_capacity # # The interval in seconds at which the event processor will reset its set of known user keys. + # @return [Float] + # @see #user_keys_capacity # attr_reader :user_keys_flush_interval # - # Whether to include full user details in every - # analytics event. By default, events will only include the user key, except for one "index" event - # that provides the full details for the user. + # Whether to include full user details in every analytics event. By default, events will only + # include the user key, except for one "index" event that provides the full details for the user. + # The only reason to change this is if you are using the Analytics Data Stream. + # @return [Boolean] # attr_reader :inline_users_in_events + # + # An object that is responsible for receiving feature flag data from LaunchDarkly. By default, + # the client uses its standard polling or streaming implementation; this is customizable for + # testing purposes. + # + # This may be set to either an object that conforms to {LaunchDarkly::Interfaces::DataSource}, + # or a lambda (or Proc) that takes two parameters-- SDK key and {Config}-- and returns such an + # object. + # + # @return [LaunchDarkly::Interfaces::DataSource|lambda] + # @see FileDataSource + # + attr_reader :data_source + + # @deprecated This is replaced by {#data_source}. attr_reader :update_processor + # @deprecated This is replaced by {#data_source}. attr_reader :update_processor_factory - + # # The default LaunchDarkly client configuration. This configuration sets # reasonable defaults for most users. - # # @return [Config] The default LaunchDarkly configuration. + # def self.default Config.new end + # + # The default value for {#capacity}. + # @return [Integer] 10000 + # def self.default_capacity 10000 end + # + # The default value for {#base_uri}. + # @return [String] "https://app.launchdarkly.com" + # def self.default_base_uri "https://app.launchdarkly.com" end + # + # The default value for {#stream_uri}. + # @return [String] "https://stream.launchdarkly.com" + # def self.default_stream_uri "https://stream.launchdarkly.com" end + # + # The default value for {#events_uri}. + # @return [String] "https://events.launchdarkly.com" + # def self.default_events_uri "https://events.launchdarkly.com" end + # + # The default value for {#cache_store}. + # @return [Object] the Rails cache if in Rails, or a simple in-memory implementation otherwise + # def self.default_cache_store defined?(Rails) && Rails.respond_to?(:cache) ? Rails.cache : ThreadSafeMemoryStore.new end + # + # The default value for {#flush_interval}. + # @return [Float] 10 + # def self.default_flush_interval 10 end + # + # The default value for {#read_timeout}. + # @return [Float] 10 + # def self.default_read_timeout 10 end + # + # The default value for {#connect_timeout}. + # @return [Float] 10 + # def self.default_connect_timeout 2 end + # + # The default value for {#proxy}. + # @return [String] nil + # def self.default_proxy nil end + # + # The default value for {#logger}. + # @return [Logger] the Rails logger if in Rails, or a default Logger at WARN level otherwise + # def self.default_logger if defined?(Rails) && Rails.respond_to?(:logger) Rails.logger @@ -279,34 +358,66 @@ def self.default_logger end end + # + # The default value for {#stream?}. + # @return [Boolean] true + # def self.default_stream true end + # + # The default value for {#use_ldd?}. + # @return [Boolean] false + # def self.default_use_ldd false end + # + # The default value for {#feature_store}. + # @return [LaunchDarkly::Interfaces::FeatureStore] an {InMemoryFeatureStore} + # def self.default_feature_store InMemoryFeatureStore.new end + # + # The default value for {#offline?}. + # @return [Boolean] false + # def self.default_offline false end + # + # The default value for {#poll_interval}. + # @return [Float] 30 + # def self.default_poll_interval 30 end + # + # The default value for {#send_events}. + # @return [Boolean] true + # def self.default_send_events true end + # + # The default value for {#user_keys_capacity}. + # @return [Integer] 1000 + # def self.default_user_keys_capacity 1000 end + # + # The default value for {#user_keys_flush_interval}. + # @return [Float] 300 + # def self.default_user_keys_flush_interval 300 end diff --git a/lib/ldclient-rb/evaluation.rb b/lib/ldclient-rb/evaluation.rb index f73eb1ed..f873a6e3 100644 --- a/lib/ldclient-rb/evaluation.rb +++ b/lib/ldclient-rb/evaluation.rb @@ -2,7 +2,7 @@ require "semantic" module LaunchDarkly - # An object returned by `LDClient.variation_detail`, combining the result of a flag evaluation with + # An object returned by {LDClient#variation_detail}, combining the result of a flag evaluation with # an explanation of how it was calculated. class EvaluationDetail def initialize(value, variation_index, reason) @@ -11,19 +11,66 @@ def initialize(value, variation_index, reason) @reason = reason end - # @return [Object] The result of the flag evaluation. This will be either one of the flag's - # variations or the default value that was passed to the `variation` method. + # + # The result of the flag evaluation. This will be either one of the flag's variations, or the + # default value that was passed to {LDClient#variation_detail}. It is the same as the return + # value of {LDClient#variation}. + # + # @return [Object] + # attr_reader :value - # @return [int|nil] The index of the returned value within the flag's list of variations, e.g. - # 0 for the first variation - or `nil` if the default value was returned. + # + # The index of the returned value within the flag's list of variations. The first variation is + # 0, the second is 1, etc. This is `nil` if the default value was returned. + # + # @return [int|nil] + # attr_reader :variation_index - # @return [Hash] An object describing the main factor that influenced the flag evaluation value. + # + # An object describing the main factor that influenced the flag evaluation value. + # + # This object is currently represented as a Hash, which may have the following keys: + # + # `:kind`: The general category of reason. Possible values: + # + # * `'OFF'`: the flag was off and therefore returned its configured off value + # * `'FALLTHROUGH'`: the flag was on but the user did not match any targets or rules + # * `'TARGET_MATCH'`: the user key was specifically targeted for this flag + # * `'RULE_MATCH'`: the user matched one of the flag's rules + # * `'PREREQUISITE_FAILED`': the flag was considered off because it had at least one + # prerequisite flag that either was off or did not return the desired variation + # * `'ERROR'`: the flag could not be evaluated, so the default value was returned + # + # `:ruleIndex`: If the kind was `RULE_MATCH`, this is the positional index of the + # matched rule (0 for the first rule). + # + # `:ruleId`: If the kind was `RULE_MATCH`, this is the rule's unique identifier. + # + # `:prerequisiteKey`: If the kind was `PREREQUISITE_FAILED`, this is the flag key of + # the prerequisite flag that failed. + # + # `:errorKind`: If the kind was `ERROR`, this indicates the type of error: + # + # * `'CLIENT_NOT_READY'`: the caller tried to evaluate a flag before the client had + # successfully initialized + # * `'FLAG_NOT_FOUND'`: the caller provided a flag key that did not match any known flag + # * `'MALFORMED_FLAG'`: there was an internal inconsistency in the flag data, e.g. a + # rule specified a nonexistent variation + # * `'USER_NOT_SPECIFIED'`: the user object or user key was not provied + # * `'EXCEPTION'`: an unexpected exception stopped flag evaluation + # + # @return [Hash] + # attr_reader :reason - # @return [boolean] True if the flag evaluated to the default value rather than to one of its - # variations. + # + # Tests whether the flag evaluation returned a default value. This is the same as checking + # whether {#variation_index} is nil. + # + # @return [Boolean] + # def default_value? variation_index.nil? end @@ -33,6 +80,7 @@ def ==(other) end end + # @private module Evaluation BUILTINS = [:key, :ip, :country, :email, :firstName, :lastName, :avatar, :name, :anonymous] diff --git a/lib/ldclient-rb/event_summarizer.rb b/lib/ldclient-rb/event_summarizer.rb index 1c55b524..c48a400f 100644 --- a/lib/ldclient-rb/event_summarizer.rb +++ b/lib/ldclient-rb/event_summarizer.rb @@ -1,11 +1,14 @@ module LaunchDarkly + # @private EventSummary = Struct.new(:start_date, :end_date, :counters) # Manages the state of summarizable information for the EventProcessor, including the # event counters and user deduplication. Note that the methods of this class are # deliberately not thread-safe; the EventProcessor is responsible for enforcing # synchronization across both the summarizer and the event queue. + # + # @private class EventSummarizer def initialize clear diff --git a/lib/ldclient-rb/events.rb b/lib/ldclient-rb/events.rb index e19d6b02..cbae5ac5 100644 --- a/lib/ldclient-rb/events.rb +++ b/lib/ldclient-rb/events.rb @@ -9,6 +9,10 @@ module LaunchDarkly MAX_FLUSH_WORKERS = 5 CURRENT_SCHEMA_VERSION = 3 + private_constant :MAX_FLUSH_WORKERS + private_constant :CURRENT_SCHEMA_VERSION + + # @private class NullEventProcessor def add_event(event) end @@ -20,6 +24,7 @@ def stop end end + # @private class EventMessage def initialize(event) @event = event @@ -27,12 +32,15 @@ def initialize(event) attr_reader :event end + # @private class FlushMessage end + # @private class FlushUsersMessage end + # @private class SynchronousMessage def initialize @reply = Concurrent::Semaphore.new(0) @@ -47,12 +55,15 @@ def wait_for_completion end end + # @private class TestSyncMessage < SynchronousMessage end + # @private class StopMessage < SynchronousMessage end + # @private class EventProcessor def initialize(sdk_key, config, client = nil) @queue = Queue.new @@ -99,6 +110,7 @@ def wait_until_inactive end end + # @private class EventDispatcher def initialize(queue, sdk_key, config, client) @sdk_key = sdk_key @@ -252,8 +264,10 @@ def handle_response(res) end end + # @private FlushPayload = Struct.new(:events, :summary) + # @private class EventBuffer def initialize(capacity, logger) @capacity = capacity @@ -290,6 +304,7 @@ def clear end end + # @private class EventPayloadSendTask def run(sdk_key, config, client, payload, formatter) events_out = formatter.make_output_events(payload.events, payload.summary) @@ -327,6 +342,7 @@ def run(sdk_key, config, client, payload, formatter) end end + # @private class EventOutputFormatter def initialize(config) @inline_users = config.inline_users_in_events diff --git a/lib/ldclient-rb/expiring_cache.rb b/lib/ldclient-rb/expiring_cache.rb index 6d8c48f8..fa6051c9 100644 --- a/lib/ldclient-rb/expiring_cache.rb +++ b/lib/ldclient-rb/expiring_cache.rb @@ -6,6 +6,7 @@ module LaunchDarkly # * made thread-safe # * removed many unused methods # * reading a key does not reset its expiration time, only writing + # @private class ExpiringCache def initialize(max_size, ttl) @max_size = max_size diff --git a/lib/ldclient-rb/file_data_source.rb b/lib/ldclient-rb/file_data_source.rb index da80f26a..7606c1d3 100644 --- a/lib/ldclient-rb/file_data_source.rb +++ b/lib/ldclient-rb/file_data_source.rb @@ -7,12 +7,15 @@ module LaunchDarkly # To avoid pulling in 'listen' and its transitive dependencies for people who aren't using the # file data source or who don't need auto-updating, we only enable auto-update if the 'listen' # gem has been provided by the host app. + # @private @@have_listen = false begin require 'listen' @@have_listen = true rescue LoadError end + + # @private def self.have_listen? @@have_listen end @@ -22,30 +25,32 @@ def self.have_listen? # used in a test environment, to operate using a predetermined feature flag state without an # actual LaunchDarkly connection. # - # To use this component, call `FileDataSource.factory`, and store its return value in the - # `update_processor_factory` property of your LaunchDarkly client configuration. In the options + # To use this component, call {FileDataSource#factory}, and store its return value in the + # {Config#data_source} property of your LaunchDarkly client configuration. In the options # to `factory`, set `paths` to the file path(s) of your data file(s): # - # factory = FileDataSource.factory(paths: [ myFilePath ]) - # config = LaunchDarkly::Config.new(update_processor_factory: factory) + # file_source = FileDataSource.factory(paths: [ myFilePath ]) + # config = LaunchDarkly::Config.new(data_source: file_source) # # This will cause the client not to connect to LaunchDarkly to get feature flags. The # client may still make network connections to send analytics events, unless you have disabled - # this with Config.send_events or Config.offline. + # this with {Config#send_events} or {Config#offline?}. # # Flag data files can be either JSON or YAML. They contain an object with three possible # properties: # - # - "flags": Feature flag definitions. - # - "flagValues": Simplified feature flags that contain only a value. - # - "segments": User segment definitions. + # - `flags`: Feature flag definitions. + # - `flagValues`: Simplified feature flags that contain only a value. + # - `segments`: User segment definitions. # - # The format of the data in "flags" and "segments" is defined by the LaunchDarkly application + # The format of the data in `flags` and `segments` is defined by the LaunchDarkly application # and is subject to change. Rather than trying to construct these objects yourself, it is simpler # to request existing flags directly from the LaunchDarkly server in JSON format, and use this # output as the starting point for your file. In Linux you would do this: # - # curl -H "Authorization: {your sdk key}" https://app.launchdarkly.com/sdk/latest-all + # ``` + # curl -H "Authorization: YOUR_SDK_KEY" https://app.launchdarkly.com/sdk/latest-all + # ``` # # The output will look something like this (but with many more properties): # @@ -108,14 +113,14 @@ class FileDataSource # @option options [Float] :poll_interval The minimum interval, in seconds, between checks for # file modifications - used only if auto_update is true, and if the native file-watching # mechanism from 'listen' is not being used. The default value is 1 second. + # @return an object that can be stored in {Config#data_source} # def self.factory(options={}) - return Proc.new do |sdk_key, config| - FileDataSourceImpl.new(config.feature_store, config.logger, options) - end + return lambda { |sdk_key, config| FileDataSourceImpl.new(config.feature_store, config.logger, options) } end end + # @private class FileDataSourceImpl def initialize(feature_store, logger, options={}) @feature_store = feature_store diff --git a/lib/ldclient-rb/flags_state.rb b/lib/ldclient-rb/flags_state.rb index b761149c..4efe1404 100644 --- a/lib/ldclient-rb/flags_state.rb +++ b/lib/ldclient-rb/flags_state.rb @@ -3,8 +3,8 @@ module LaunchDarkly # # A snapshot of the state of all feature flags with regard to a specific user, generated by - # calling the client's all_flags_state method. Serializing this object to JSON using - # JSON.generate (or the to_json method) will produce the appropriate data structure for + # calling the {LDClient#all_flags_state}. Serializing this object to JSON using + # `JSON.generate` (or the `to_json` method) will produce the appropriate data structure for # bootstrapping the LaunchDarkly JavaScript client. # class FeatureFlagsState @@ -15,6 +15,7 @@ def initialize(valid) end # Used internally to build the state map. + # @private def add_flag(flag, value, variation, reason = nil, details_only_if_tracked = false) key = flag[:key] @flag_values[key] = value diff --git a/lib/ldclient-rb/impl.rb b/lib/ldclient-rb/impl.rb new file mode 100644 index 00000000..b0d63ebe --- /dev/null +++ b/lib/ldclient-rb/impl.rb @@ -0,0 +1,13 @@ + +module LaunchDarkly + # + # Internal implementation classes. Everything in this module should be considered unsupported + # and subject to change. + # + # @since 5.5.0 + # @private + # + module Impl + # code is in ldclient-rb/impl/ + end +end diff --git a/lib/ldclient-rb/impl/integrations/consul_impl.rb b/lib/ldclient-rb/impl/integrations/consul_impl.rb new file mode 100644 index 00000000..10c16dbc --- /dev/null +++ b/lib/ldclient-rb/impl/integrations/consul_impl.rb @@ -0,0 +1,158 @@ +require "json" + +module LaunchDarkly + module Impl + module Integrations + module Consul + # + # Internal implementation of the Consul feature store, intended to be used with CachingStoreWrapper. + # + class ConsulFeatureStoreCore + begin + require "diplomat" + CONSUL_ENABLED = true + rescue ScriptError, StandardError + CONSUL_ENABLED = false + end + + def initialize(opts) + if !CONSUL_ENABLED + raise RuntimeError.new("can't use Consul feature store without the 'diplomat' gem") + end + + @prefix = (opts[:prefix] || LaunchDarkly::Integrations::Consul.default_prefix) + '/' + @logger = opts[:logger] || Config.default_logger + Diplomat.configuration = opts[:consul_config] if !opts[:consul_config].nil? + Diplomat.configuration.url = opts[:url] if !opts[:url].nil? + @logger.info("ConsulFeatureStore: using Consul host at #{Diplomat.configuration.url}") + end + + def init_internal(all_data) + # Start by reading the existing keys; we will later delete any of these that weren't in all_data. + unused_old_keys = Set.new + keys = Diplomat::Kv.get(@prefix, { keys: true, recurse: true }, :return) + unused_old_keys.merge(keys) if keys != "" + + ops = [] + num_items = 0 + + # Insert or update every provided item + all_data.each do |kind, items| + items.values.each do |item| + value = item.to_json + key = item_key(kind, item[:key]) + ops.push({ 'KV' => { 'Verb' => 'set', 'Key' => key, 'Value' => value } }) + unused_old_keys.delete(key) + num_items = num_items + 1 + end + end + + # Now delete any previously existing items whose keys were not in the current data + unused_old_keys.each do |key| + ops.push({ 'KV' => { 'Verb' => 'delete', 'Key' => key } }) + end + + # Now set the special key that we check in initialized_internal? + ops.push({ 'KV' => { 'Verb' => 'set', 'Key' => inited_key, 'Value' => '' } }) + + ConsulUtil.batch_operations(ops) + + @logger.info { "Initialized database with #{num_items} items" } + end + + def get_internal(kind, key) + value = Diplomat::Kv.get(item_key(kind, key), {}, :return) # :return means "don't throw an error if not found" + (value.nil? || value == "") ? nil : JSON.parse(value, symbolize_names: true) + end + + def get_all_internal(kind) + items_out = {} + results = Diplomat::Kv.get(kind_key(kind), { recurse: true }, :return) + (results == "" ? [] : results).each do |result| + value = result[:value] + if !value.nil? + item = JSON.parse(value, symbolize_names: true) + items_out[item[:key].to_sym] = item + end + end + items_out + end + + def upsert_internal(kind, new_item) + key = item_key(kind, new_item[:key]) + json = new_item.to_json + + # We will potentially keep retrying indefinitely until someone's write succeeds + while true + old_value = Diplomat::Kv.get(key, { decode_values: true }, :return) + if old_value.nil? || old_value == "" + mod_index = 0 + else + old_item = JSON.parse(old_value[0]["Value"], symbolize_names: true) + # Check whether the item is stale. If so, don't do the update (and return the existing item to + # FeatureStoreWrapper so it can be cached) + if old_item[:version] >= new_item[:version] + return old_item + end + mod_index = old_value[0]["ModifyIndex"] + end + + # Otherwise, try to write. We will do a compare-and-set operation, so the write will only succeed if + # the key's ModifyIndex is still equal to the previous value. If the previous ModifyIndex was zero, + # it means the key did not previously exist and the write will only succeed if it still doesn't exist. + success = Diplomat::Kv.put(key, json, cas: mod_index) + return new_item if success + + # If we failed, retry the whole shebang + @logger.debug { "Concurrent modification detected, retrying" } + end + end + + def initialized_internal? + # Unfortunately we need to use exceptions here, instead of the :return parameter, because with + # :return there's no way to distinguish between a missing value and an empty string. + begin + Diplomat::Kv.get(inited_key, {}) + true + rescue Diplomat::KeyNotFound + false + end + end + + def stop + # There's no Consul client instance to dispose of + end + + private + + def item_key(kind, key) + kind_key(kind) + key.to_s + end + + def kind_key(kind) + @prefix + kind[:namespace] + '/' + end + + def inited_key + @prefix + '$inited' + end + end + + class ConsulUtil + # + # Submits as many transactions as necessary to submit all of the given operations. + # The ops array is consumed. + # + def self.batch_operations(ops) + batch_size = 64 # Consul can only do this many at a time + while true + chunk = ops.shift(batch_size) + break if chunk.empty? + Diplomat::Kv.txn(chunk) + end + end + end + end + end + end +end diff --git a/lib/ldclient-rb/impl/integrations/dynamodb_impl.rb b/lib/ldclient-rb/impl/integrations/dynamodb_impl.rb new file mode 100644 index 00000000..a76fae52 --- /dev/null +++ b/lib/ldclient-rb/impl/integrations/dynamodb_impl.rb @@ -0,0 +1,228 @@ +require "json" + +module LaunchDarkly + module Impl + module Integrations + module DynamoDB + # + # Internal implementation of the DynamoDB feature store, intended to be used with CachingStoreWrapper. + # + class DynamoDBFeatureStoreCore + begin + require "aws-sdk-dynamodb" + AWS_SDK_ENABLED = true + rescue ScriptError, StandardError + begin + require "aws-sdk" + AWS_SDK_ENABLED = true + rescue ScriptError, StandardError + AWS_SDK_ENABLED = false + end + end + + PARTITION_KEY = "namespace" + SORT_KEY = "key" + + VERSION_ATTRIBUTE = "version" + ITEM_JSON_ATTRIBUTE = "item" + + def initialize(table_name, opts) + if !AWS_SDK_ENABLED + raise RuntimeError.new("can't use DynamoDB feature store without the aws-sdk or aws-sdk-dynamodb gem") + end + + @table_name = table_name + @prefix = opts[:prefix] + @logger = opts[:logger] || Config.default_logger + + if !opts[:existing_client].nil? + @client = opts[:existing_client] + else + @client = Aws::DynamoDB::Client.new(opts[:dynamodb_opts] || {}) + end + + @logger.info("DynamoDBFeatureStore: using DynamoDB table \"#{table_name}\"") + end + + def init_internal(all_data) + # Start by reading the existing keys; we will later delete any of these that weren't in all_data. + unused_old_keys = read_existing_keys(all_data.keys) + + requests = [] + num_items = 0 + + # Insert or update every provided item + all_data.each do |kind, items| + items.values.each do |item| + requests.push({ put_request: { item: marshal_item(kind, item) } }) + unused_old_keys.delete([ namespace_for_kind(kind), item[:key] ]) + num_items = num_items + 1 + end + end + + # Now delete any previously existing items whose keys were not in the current data + unused_old_keys.each do |tuple| + del_item = make_keys_hash(tuple[0], tuple[1]) + requests.push({ delete_request: { key: del_item } }) + end + + # Now set the special key that we check in initialized_internal? + inited_item = make_keys_hash(inited_key, inited_key) + requests.push({ put_request: { item: inited_item } }) + + DynamoDBUtil.batch_write_requests(@client, @table_name, requests) + + @logger.info { "Initialized table #{@table_name} with #{num_items} items" } + end + + def get_internal(kind, key) + resp = get_item_by_keys(namespace_for_kind(kind), key) + unmarshal_item(resp.item) + end + + def get_all_internal(kind) + items_out = {} + req = make_query_for_kind(kind) + while true + resp = @client.query(req) + resp.items.each do |item| + item_out = unmarshal_item(item) + items_out[item_out[:key].to_sym] = item_out + end + break if resp.last_evaluated_key.nil? || resp.last_evaluated_key.length == 0 + req.exclusive_start_key = resp.last_evaluated_key + end + items_out + end + + def upsert_internal(kind, new_item) + encoded_item = marshal_item(kind, new_item) + begin + @client.put_item({ + table_name: @table_name, + item: encoded_item, + condition_expression: "attribute_not_exists(#namespace) or attribute_not_exists(#key) or :version > #version", + expression_attribute_names: { + "#namespace" => PARTITION_KEY, + "#key" => SORT_KEY, + "#version" => VERSION_ATTRIBUTE + }, + expression_attribute_values: { + ":version" => new_item[:version] + } + }) + new_item + rescue Aws::DynamoDB::Errors::ConditionalCheckFailedException + # The item was not updated because there's a newer item in the database. + # We must now read the item that's in the database and return it, so CachingStoreWrapper can cache it. + get_internal(kind, new_item[:key]) + end + end + + def initialized_internal? + resp = get_item_by_keys(inited_key, inited_key) + !resp.item.nil? && resp.item.length > 0 + end + + def stop + # AWS client doesn't seem to have a close method + end + + private + + def prefixed_namespace(base_str) + (@prefix.nil? || @prefix == "") ? base_str : "#{@prefix}:#{base_str}" + end + + def namespace_for_kind(kind) + prefixed_namespace(kind[:namespace]) + end + + def inited_key + prefixed_namespace("$inited") + end + + def make_keys_hash(namespace, key) + { + PARTITION_KEY => namespace, + SORT_KEY => key + } + end + + def make_query_for_kind(kind) + { + table_name: @table_name, + consistent_read: true, + key_conditions: { + PARTITION_KEY => { + comparison_operator: "EQ", + attribute_value_list: [ namespace_for_kind(kind) ] + } + } + } + end + + def get_item_by_keys(namespace, key) + @client.get_item({ + table_name: @table_name, + key: make_keys_hash(namespace, key) + }) + end + + def read_existing_keys(kinds) + keys = Set.new + kinds.each do |kind| + req = make_query_for_kind(kind).merge({ + projection_expression: "#namespace, #key", + expression_attribute_names: { + "#namespace" => PARTITION_KEY, + "#key" => SORT_KEY + } + }) + while true + resp = @client.query(req) + resp.items.each do |item| + namespace = item[PARTITION_KEY] + key = item[SORT_KEY] + keys.add([ namespace, key ]) + end + break if resp.last_evaluated_key.nil? || resp.last_evaluated_key.length == 0 + req.exclusive_start_key = resp.last_evaluated_key + end + end + keys + end + + def marshal_item(kind, item) + make_keys_hash(namespace_for_kind(kind), item[:key]).merge({ + VERSION_ATTRIBUTE => item[:version], + ITEM_JSON_ATTRIBUTE => item.to_json + }) + end + + def unmarshal_item(item) + return nil if item.nil? || item.length == 0 + json_attr = item[ITEM_JSON_ATTRIBUTE] + raise RuntimeError.new("DynamoDB map did not contain expected item string") if json_attr.nil? + JSON.parse(json_attr, symbolize_names: true) + end + end + + class DynamoDBUtil + # + # Calls client.batch_write_item as many times as necessary to submit all of the given requests. + # The requests array is consumed. + # + def self.batch_write_requests(client, table, requests) + batch_size = 25 + while true + chunk = requests.shift(batch_size) + break if chunk.empty? + client.batch_write_item({ request_items: { table => chunk } }) + end + end + end + end + end + end +end diff --git a/lib/ldclient-rb/impl/integrations/redis_impl.rb b/lib/ldclient-rb/impl/integrations/redis_impl.rb new file mode 100644 index 00000000..107340f8 --- /dev/null +++ b/lib/ldclient-rb/impl/integrations/redis_impl.rb @@ -0,0 +1,155 @@ +require "concurrent/atomics" +require "json" + +module LaunchDarkly + module Impl + module Integrations + module Redis + # + # Internal implementation of the Redis feature store, intended to be used with CachingStoreWrapper. + # + class RedisFeatureStoreCore + begin + require "redis" + require "connection_pool" + REDIS_ENABLED = true + rescue ScriptError, StandardError + REDIS_ENABLED = false + end + + def initialize(opts) + if !REDIS_ENABLED + raise RuntimeError.new("can't use Redis feature store because one of these gems is missing: redis, connection_pool") + end + + @redis_opts = opts[:redis_opts] || Hash.new + if opts[:redis_url] + @redis_opts[:url] = opts[:redis_url] + end + if !@redis_opts.include?(:url) + @redis_opts[:url] = LaunchDarkly::Integrations::Redis::default_redis_url + end + max_connections = opts[:max_connections] || 16 + @pool = opts[:pool] || ConnectionPool.new(size: max_connections) do + ::Redis.new(@redis_opts) + end + @prefix = opts[:prefix] || LaunchDarkly::Integrations::Redis::default_prefix + @logger = opts[:logger] || Config.default_logger + @test_hook = opts[:test_hook] # used for unit tests, deliberately undocumented + + @stopped = Concurrent::AtomicBoolean.new(false) + + with_connection do |redis| + @logger.info("RedisFeatureStore: using Redis instance at #{redis.connection[:host]}:#{redis.connection[:port]} \ + and prefix: #{@prefix}") + end + end + + def init_internal(all_data) + count = 0 + with_connection do |redis| + redis.multi do |multi| + all_data.each do |kind, items| + multi.del(items_key(kind)) + count = count + items.count + items.each do |key, item| + multi.hset(items_key(kind), key, item.to_json) + end + end + multi.set(inited_key, inited_key) + end + end + @logger.info { "RedisFeatureStore: initialized with #{count} items" } + end + + def get_internal(kind, key) + with_connection do |redis| + get_redis(redis, kind, key) + end + end + + def get_all_internal(kind) + fs = {} + with_connection do |redis| + hashfs = redis.hgetall(items_key(kind)) + hashfs.each do |k, json_item| + f = JSON.parse(json_item, symbolize_names: true) + fs[k.to_sym] = f + end + end + fs + end + + def upsert_internal(kind, new_item) + base_key = items_key(kind) + key = new_item[:key] + try_again = true + final_item = new_item + while try_again + try_again = false + with_connection do |redis| + redis.watch(base_key) do + old_item = get_redis(redis, kind, key) + before_update_transaction(base_key, key) + if old_item.nil? || old_item[:version] < new_item[:version] + result = redis.multi do |multi| + multi.hset(base_key, key, new_item.to_json) + end + if result.nil? + @logger.debug { "RedisFeatureStore: concurrent modification detected, retrying" } + try_again = true + end + else + final_item = old_item + action = new_item[:deleted] ? "delete" : "update" + @logger.warn { "RedisFeatureStore: attempted to #{action} #{key} version: #{old_item[:version]} \ + in '#{kind[:namespace]}' with a version that is the same or older: #{new_item[:version]}" } + end + redis.unwatch + end + end + end + final_item + end + + def initialized_internal? + with_connection { |redis| redis.exists(inited_key) } + end + + def stop + if @stopped.make_true + @pool.shutdown { |redis| redis.close } + end + end + + private + + def before_update_transaction(base_key, key) + @test_hook.before_update_transaction(base_key, key) if !@test_hook.nil? + end + + def items_key(kind) + @prefix + ":" + kind[:namespace] + end + + def cache_key(kind, key) + kind[:namespace] + ":" + key.to_s + end + + def inited_key + @prefix + ":$inited" + end + + def with_connection + @pool.with { |redis| yield(redis) } + end + + def get_redis(redis, kind, key) + json_item = redis.hget(items_key(kind), key) + json_item.nil? ? nil : JSON.parse(json_item, symbolize_names: true) + end + end + end + end + end +end diff --git a/lib/ldclient-rb/impl/store_client_wrapper.rb b/lib/ldclient-rb/impl/store_client_wrapper.rb new file mode 100644 index 00000000..f0948251 --- /dev/null +++ b/lib/ldclient-rb/impl/store_client_wrapper.rb @@ -0,0 +1,47 @@ +require "ldclient-rb/interfaces" +require "ldclient-rb/impl/store_data_set_sorter" + +module LaunchDarkly + module Impl + # + # Provides additional behavior that the client requires before or after feature store operations. + # Currently this just means sorting the data set for init(). In the future we may also use this + # to provide an update listener capability. + # + class FeatureStoreClientWrapper + include Interfaces::FeatureStore + + def initialize(store) + @store = store + end + + def init(all_data) + @store.init(FeatureStoreDataSetSorter.sort_all_collections(all_data)) + end + + def get(kind, key) + @store.get(kind, key) + end + + def all(kind) + @store.all(kind) + end + + def upsert(kind, item) + @store.upsert(kind, item) + end + + def delete(kind, key, version) + @store.delete(kind, key, version) + end + + def initialized? + @store.initialized? + end + + def stop + @store.stop + end + end + end +end diff --git a/lib/ldclient-rb/impl/store_data_set_sorter.rb b/lib/ldclient-rb/impl/store_data_set_sorter.rb new file mode 100644 index 00000000..4454fe75 --- /dev/null +++ b/lib/ldclient-rb/impl/store_data_set_sorter.rb @@ -0,0 +1,55 @@ + +module LaunchDarkly + module Impl + # + # Implements a dependency graph ordering for data to be stored in a feature store. We must use this + # on every data set that will be passed to the feature store's init() method. + # + class FeatureStoreDataSetSorter + # + # Returns a copy of the input hash that has the following guarantees: the iteration order of the outer + # hash will be in ascending order by the VersionDataKind's :priority property (if any), and for each + # data kind that has a :get_dependency_keys function, the inner hash will have an iteration order + # where B is before A if A has a dependency on B. + # + # This implementation relies on the fact that hashes in Ruby have an iteration order that is the same + # as the insertion order. Also, due to the way we deserialize JSON received from LaunchDarkly, the + # keys in the inner hash will always be symbols. + # + def self.sort_all_collections(all_data) + outer_hash = {} + kinds = all_data.keys.sort_by { |k| + k[:priority].nil? ? k[:namespace].length : k[:priority] # arbitrary order if priority is unknown + } + kinds.each do |kind| + items = all_data[kind] + outer_hash[kind] = self.sort_collection(kind, items) + end + outer_hash + end + + def self.sort_collection(kind, input) + dependency_fn = kind[:get_dependency_keys] + return input if dependency_fn.nil? || input.empty? + remaining_items = input.clone + items_out = {} + while !remaining_items.empty? + # pick a random item that hasn't been updated yet + key, item = remaining_items.first + self.add_with_dependencies_first(item, dependency_fn, remaining_items, items_out) + end + items_out + end + + def self.add_with_dependencies_first(item, dependency_fn, remaining_items, items_out) + item_key = item[:key].to_sym + remaining_items.delete(item_key) # we won't need to visit this item again + dependency_fn.call(item).each do |dep_key| + dep_item = remaining_items[dep_key.to_sym] + self.add_with_dependencies_first(dep_item, dependency_fn, remaining_items, items_out) if !dep_item.nil? + end + items_out[item_key] = item + end + end + end +end diff --git a/lib/ldclient-rb/in_memory_store.rb b/lib/ldclient-rb/in_memory_store.rb index e3e85879..576d90c7 100644 --- a/lib/ldclient-rb/in_memory_store.rb +++ b/lib/ldclient-rb/in_memory_store.rb @@ -6,20 +6,31 @@ module LaunchDarkly # we add another storable data type in the future, as long as it follows the same pattern # (having "key", "version", and "deleted" properties), we only need to add a corresponding # constant here and the existing store should be able to handle it. + # + # The :priority and :get_dependency_keys properties are used by FeatureStoreDataSetSorter + # to ensure data consistency during non-atomic updates. + + # @private FEATURES = { - namespace: "features" + namespace: "features", + priority: 1, # that is, features should be stored after segments + get_dependency_keys: lambda { |flag| (flag[:prerequisites] || []).map { |p| p[:key] } } }.freeze + # @private SEGMENTS = { - namespace: "segments" + namespace: "segments", + priority: 0 }.freeze # # Default implementation of the LaunchDarkly client's feature store, using an in-memory - # cache. This object holds feature flags and related data received from the - # streaming API. + # cache. This object holds feature flags and related data received from LaunchDarkly. + # Database-backed implementations are available in {LaunchDarkly::Integrations}. # class InMemoryFeatureStore + include LaunchDarkly::Interfaces::FeatureStore + def initialize @items = Hash.new @lock = Concurrent::ReadWriteLock.new diff --git a/lib/ldclient-rb/integrations.rb b/lib/ldclient-rb/integrations.rb new file mode 100644 index 00000000..8c9f6249 --- /dev/null +++ b/lib/ldclient-rb/integrations.rb @@ -0,0 +1,55 @@ +require "ldclient-rb/integrations/consul" +require "ldclient-rb/integrations/dynamodb" +require "ldclient-rb/integrations/redis" +require "ldclient-rb/integrations/util/store_wrapper" + +module LaunchDarkly + # + # Tools for connecting the LaunchDarkly client to other software. + # + module Integrations + # + # Integration with [Consul](https://www.consul.io/). + # + # Note that in order to use this integration, you must first install the gem `diplomat`. + # + # @since 5.5.0 + # + module Consul + # code is in ldclient-rb/impl/integrations/consul_impl + end + + # + # Integration with [DynamoDB](https://aws.amazon.com/dynamodb/). + # + # Note that in order to use this integration, you must first install one of the AWS SDK gems: either + # `aws-sdk-dynamodb`, or the full `aws-sdk`. + # + # @since 5.5.0 + # + module DynamoDB + # code is in ldclient-rb/impl/integrations/dynamodb_impl + end + + # + # Integration with [Redis](https://redis.io/). + # + # Note that in order to use this integration, you must first install the `redis` and `connection-pool` + # gems. + # + # @since 5.5.0 + # + module Redis + # code is in ldclient-rb/impl/integrations/redis_impl + end + + # + # Support code that may be helpful in creating integrations. + # + # @since 5.5.0 + # + module Util + # code is in ldclient-rb/integrations/util/ + end + end +end diff --git a/lib/ldclient-rb/integrations/consul.rb b/lib/ldclient-rb/integrations/consul.rb new file mode 100644 index 00000000..2d46d813 --- /dev/null +++ b/lib/ldclient-rb/integrations/consul.rb @@ -0,0 +1,38 @@ +require "ldclient-rb/impl/integrations/consul_impl" +require "ldclient-rb/integrations/util/store_wrapper" + +module LaunchDarkly + module Integrations + module Consul + # + # Default value for the `prefix` option for {new_feature_store}. + # + # @return [String] the default key prefix + # + def self.default_prefix + 'launchdarkly' + end + + # + # Creates a Consul-backed persistent feature store. + # + # To use this method, you must first install the gem `diplomat`. Then, put the object returned by + # this method into the `feature_store` property of your client configuration ({LaunchDarkly::Config}). + # + # @param opts [Hash] the configuration options + # @option opts [Hash] :consul_config an instance of `Diplomat::Configuration` to replace the default + # Consul client configuration (note that this is exactly the same as modifying `Diplomat.configuration`) + # @option opts [String] :url shortcut for setting the `url` property of the Consul client configuration + # @option opts [String] :prefix namespace prefix to add to all keys used by LaunchDarkly + # @option opts [Logger] :logger a `Logger` instance; defaults to `Config.default_logger` + # @option opts [Integer] :expiration_seconds (15) expiration time for the in-memory cache, in seconds; 0 for no local caching + # @option opts [Integer] :capacity (1000) maximum number of items in the cache + # @return [LaunchDarkly::Interfaces::FeatureStore] a feature store object + # + def self.new_feature_store(opts, &block) + core = LaunchDarkly::Impl::Integrations::Consul::ConsulFeatureStoreCore.new(opts) + return LaunchDarkly::Integrations::Util::CachingStoreWrapper.new(core, opts) + end + end + end +end diff --git a/lib/ldclient-rb/integrations/dynamodb.rb b/lib/ldclient-rb/integrations/dynamodb.rb new file mode 100644 index 00000000..ecd87fce --- /dev/null +++ b/lib/ldclient-rb/integrations/dynamodb.rb @@ -0,0 +1,47 @@ +require "ldclient-rb/impl/integrations/dynamodb_impl" +require "ldclient-rb/integrations/util/store_wrapper" + +module LaunchDarkly + module Integrations + module DynamoDB + # + # Creates a DynamoDB-backed persistent feature store. For more details about how and why you can + # use a persistent feature store, see the + # [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store). + # + # To use this method, you must first install one of the AWS SDK gems: either `aws-sdk-dynamodb`, or + # the full `aws-sdk`. Then, put the object returned by this method into the `feature_store` property + # of your client configuration ({LaunchDarkly::Config}). + # + # @example Configuring the feature store + # store = LaunchDarkly::Integrations::DynamoDB::new_feature_store("my-table-name") + # config = LaunchDarkly::Config.new(feature_store: store) + # client = LaunchDarkly::LDClient.new(my_sdk_key, config) + # + # Note that the specified table must already exist in DynamoDB. It must have a partition key called + # "namespace", and a sort key called "key" (both strings). The SDK does not create the table + # automatically because it has no way of knowing what additional properties (such as permissions + # and throughput) you would want it to have. + # + # By default, the DynamoDB client will try to get your AWS credentials and region name from + # environment variables and/or local configuration files, as described in the AWS SDK documentation. + # You can also specify any supported AWS SDK options in `dynamodb_opts`-- or, provide an + # already-configured DynamoDB client in `existing_client`. + # + # @param table_name [String] name of an existing DynamoDB table + # @param opts [Hash] the configuration options + # @option opts [Hash] :dynamodb_opts options to pass to the DynamoDB client constructor (ignored if you specify `:existing_client`) + # @option opts [Object] :existing_client an already-constructed DynamoDB client for the feature store to use + # @option opts [String] :prefix namespace prefix to add to all keys used by LaunchDarkly + # @option opts [Logger] :logger a `Logger` instance; defaults to `Config.default_logger` + # @option opts [Integer] :expiration_seconds (15) expiration time for the in-memory cache, in seconds; 0 for no local caching + # @option opts [Integer] :capacity (1000) maximum number of items in the cache + # @return [LaunchDarkly::Interfaces::FeatureStore] a feature store object + # + def self.new_feature_store(table_name, opts) + core = LaunchDarkly::Impl::Integrations::DynamoDB::DynamoDBFeatureStoreCore.new(table_name, opts) + return LaunchDarkly::Integrations::Util::CachingStoreWrapper.new(core, opts) + end + end + end +end diff --git a/lib/ldclient-rb/integrations/redis.rb b/lib/ldclient-rb/integrations/redis.rb new file mode 100644 index 00000000..34509181 --- /dev/null +++ b/lib/ldclient-rb/integrations/redis.rb @@ -0,0 +1,55 @@ +require "ldclient-rb/redis_store" # eventually we will just refer to impl/integrations/redis_impl directly + +module LaunchDarkly + module Integrations + module Redis + # + # Default value for the `redis_url` option for {new_feature_store}. This points to an instance of + # Redis running at `localhost` with its default port. + # + # @return [String] the default Redis URL + # + def self.default_redis_url + 'redis://localhost:6379/0' + end + + # + # Default value for the `prefix` option for {new_feature_store}. + # + # @return [String] the default key prefix + # + def self.default_prefix + 'launchdarkly' + end + + # + # Creates a Redis-backed persistent feature store. For more details about how and why you can + # use a persistent feature store, see the + # [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store). + # + # To use this method, you must first have the `redis` and `connection-pool` gems installed. Then, + # put the object returned by this method into the `feature_store` property of your + # client configuration. + # + # @example Configuring the feature store + # store = LaunchDarkly::Integrations::Redis::new_feature_store(redis_url: "redis://my-server") + # config = LaunchDarkly::Config.new(feature_store: store) + # client = LaunchDarkly::LDClient.new(my_sdk_key, config) + # + # @param opts [Hash] the configuration options + # @option opts [String] :redis_url (default_redis_url) URL of the Redis instance (shortcut for omitting `redis_opts`) + # @option opts [Hash] :redis_opts options to pass to the Redis constructor (if you want to specify more than just `redis_url`) + # @option opts [String] :prefix (default_prefix) namespace prefix to add to all hash keys used by LaunchDarkly + # @option opts [Logger] :logger a `Logger` instance; defaults to `Config.default_logger` + # @option opts [Integer] :max_connections size of the Redis connection pool + # @option opts [Integer] :expiration_seconds (15) expiration time for the in-memory cache, in seconds; 0 for no local caching + # @option opts [Integer] :capacity (1000) maximum number of items in the cache + # @option opts [Object] :pool custom connection pool, if desired + # @return [LaunchDarkly::Interfaces::FeatureStore] a feature store object + # + def self.new_feature_store(opts) + return RedisFeatureStore.new(opts) + end + end + end +end diff --git a/lib/ldclient-rb/integrations/util/store_wrapper.rb b/lib/ldclient-rb/integrations/util/store_wrapper.rb new file mode 100644 index 00000000..eef22d5e --- /dev/null +++ b/lib/ldclient-rb/integrations/util/store_wrapper.rb @@ -0,0 +1,230 @@ +require "concurrent/atomics" + +require "ldclient-rb/expiring_cache" + +module LaunchDarkly + module Integrations + module Util + # + # CachingStoreWrapper is a partial implementation of the {LaunchDarkly::Interfaces::FeatureStore} + # pattern that delegates part of its behavior to another object, while providing optional caching + # behavior and other logic that would otherwise be repeated in every feature store implementation. + # This makes it easier to create new database integrations by implementing only the database-specific + # logic. + # + # The mixin {FeatureStoreCore} describes the methods that need to be supported by the inner + # implementation object. + # + class CachingStoreWrapper + include LaunchDarkly::Interfaces::FeatureStore + + # + # Creates a new store wrapper instance. + # + # @param core [Object] an object that implements the {FeatureStoreCore} methods + # @param opts [Hash] a hash that may include cache-related options; all others will be ignored + # @option opts [Float] :expiration_seconds (15) cache TTL; zero means no caching + # @option opts [Integer] :capacity (1000) maximum number of items in the cache + # + def initialize(core, opts) + @core = core + + expiration_seconds = opts[:expiration] || 15 + if expiration_seconds > 0 + capacity = opts[:capacity] || 1000 + @cache = ExpiringCache.new(capacity, expiration_seconds) + else + @cache = nil + end + + @inited = Concurrent::AtomicBoolean.new(false) + end + + def init(all_data) + @core.init_internal(all_data) + @inited.make_true + + if !@cache.nil? + @cache.clear + all_data.each do |kind, items| + @cache[kind] = items_if_not_deleted(items) + items.each do |key, item| + @cache[item_cache_key(kind, key)] = [item] + end + end + end + end + + def get(kind, key) + if !@cache.nil? + cache_key = item_cache_key(kind, key) + cached = @cache[cache_key] # note, item entries in the cache are wrapped in an array so we can cache nil values + return item_if_not_deleted(cached[0]) if !cached.nil? + end + + item = @core.get_internal(kind, key) + + if !@cache.nil? + @cache[cache_key] = [item] + end + + item_if_not_deleted(item) + end + + def all(kind) + if !@cache.nil? + items = @cache[all_cache_key(kind)] + return items if !items.nil? + end + + items = items_if_not_deleted(@core.get_all_internal(kind)) + @cache[all_cache_key(kind)] = items if !@cache.nil? + items + end + + def upsert(kind, item) + new_state = @core.upsert_internal(kind, item) + + if !@cache.nil? + @cache[item_cache_key(kind, item[:key])] = [new_state] + @cache.delete(all_cache_key(kind)) + end + end + + def delete(kind, key, version) + upsert(kind, { key: key, version: version, deleted: true }) + end + + def initialized? + return true if @inited.value + + if @cache.nil? + result = @core.initialized_internal? + else + result = @cache[inited_cache_key] + if result.nil? + result = @core.initialized_internal? + @cache[inited_cache_key] = result + end + end + + @inited.make_true if result + result + end + + def stop + @core.stop + end + + private + + # We use just one cache for 3 kinds of objects. Individual entities use a key like 'features:my-flag'. + def item_cache_key(kind, key) + kind[:namespace] + ":" + key.to_s + end + + # The result of a call to get_all_internal is cached using the "kind" object as a key. + def all_cache_key(kind) + kind + end + + # The result of initialized_internal? is cached using this key. + def inited_cache_key + "$inited" + end + + def item_if_not_deleted(item) + (item.nil? || item[:deleted]) ? nil : item + end + + def items_if_not_deleted(items) + items.select { |key, item| !item[:deleted] } + end + end + + # + # This module describes the methods that you must implement on your own object in order to + # use {CachingStoreWrapper}. + # + module FeatureStoreCore + # + # Initializes the store. This is the same as {LaunchDarkly::Interfaces::FeatureStore#init}, + # but the wrapper will take care of updating the cache if caching is enabled. + # + # If possible, the store should update the entire data set atomically. If that is not possible, + # it should iterate through the outer hash and then the inner hash using the existing iteration + # order of those hashes (the SDK will ensure that the items were inserted into the hashes in + # the correct order), storing each item, and then delete any leftover items at the very end. + # + # @param all_data [Hash] a hash where each key is one of the data kind objects, and each + # value is in turn a hash of string keys to entities + # @return [void] + # + def init_internal(all_data) + end + + # + # Retrieves a single entity. This is the same as {LaunchDarkly::Interfaces::FeatureStore#get} + # except that 1. the wrapper will take care of filtering out deleted entities by checking the + # `:deleted` property, so you can just return exactly what was in the data store, and 2. the + # wrapper will take care of checking and updating the cache if caching is enabled. + # + # @param kind [Object] the kind of entity to get + # @param key [String] the unique key of the entity to get + # @return [Hash] the entity; nil if the key was not found + # + def get_internal(kind, key) + end + + # + # Retrieves all entities of the specified kind. This is the same as {LaunchDarkly::Interfaces::FeatureStore#all} + # except that 1. the wrapper will take care of filtering out deleted entities by checking the + # `:deleted` property, so you can just return exactly what was in the data store, and 2. the + # wrapper will take care of checking and updating the cache if caching is enabled. + # + # @param kind [Object] the kind of entity to get + # @return [Hash] a hash where each key is the entity's `:key` property and each value + # is the entity + # + def get_all_internal(kind) + end + + # + # Attempts to add or update an entity. This is the same as {LaunchDarkly::Interfaces::FeatureStore#upsert} + # except that 1. the wrapper will take care of updating the cache if caching is enabled, and 2. + # the method is expected to return the final state of the entity (i.e. either the `item` + # parameter if the update succeeded, or the previously existing entity in the store if the + # update failed; this is used for the caching logic). + # + # Note that FeatureStoreCore does not have a `delete` method. This is because {CachingStoreWrapper} + # implements `delete` by simply calling `upsert` with an item whose `:deleted` property is true. + # + # @param kind [Object] the kind of entity to add or update + # @param item [Hash] the entity to add or update + # @return [Hash] the entity as it now exists in the store after the update + # + def upsert_internal(kind, item) + end + + # + # Checks whether this store has been initialized. This is the same as + # {LaunchDarkly::Interfaces::FeatureStore#initialized?} except that there is less of a concern + # for efficiency, because the wrapper will use caching and memoization in order to call the method + # as little as possible. + # + # @return [Boolean] true if the store is in an initialized state + # + def initialized_internal? + end + + # + # Performs any necessary cleanup to shut down the store when the client is being shut down. + # + # @return [void] + # + def stop + end + end + end + end +end diff --git a/lib/ldclient-rb/interfaces.rb b/lib/ldclient-rb/interfaces.rb new file mode 100644 index 00000000..d2a9f862 --- /dev/null +++ b/lib/ldclient-rb/interfaces.rb @@ -0,0 +1,153 @@ + +module LaunchDarkly + # + # Mixins that define the required methods of various pluggable components used by the client. + # + module Interfaces + # + # Mixin that defines the required methods of a feature store implementation. The LaunchDarkly + # client uses the feature store to persist feature flags and related objects received from + # the LaunchDarkly service. Implementations must support concurrent access and updates. + # For more about how feature stores can be used, see: + # [Using a persistent feature store](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store). + # + # An entity that can be stored in a feature store is a hash that can be converted to and from + # JSON, and that has at a minimum the following properties: `:key`, a string that is unique + # among entities of the same kind; `:version`, an integer that is higher for newer data; + # `:deleted`, a boolean (optional, defaults to false) that if true means this is a + # placeholder for a deleted entity. + # + # To represent the different kinds of objects that can be stored, such as feature flags and + # segments, the SDK will provide a "kind" object; this is a hash with a single property, + # `:namespace`, which is a short string unique to that kind. This string can be used as a + # collection name or a key prefix. + # + # The default implementation is {LaunchDarkly::InMemoryFeatureStore}. Several implementations + # that use databases can be found in {LaunchDarkly::Integrations}. If you want to write a new + # implementation, see {LaunchDarkly::Integrations::Util} for tools that can make this task + # simpler. + # + module FeatureStore + # + # Initializes (or re-initializes) the store with the specified set of entities. Any + # existing entries will be removed. Implementations can assume that this data set is up to + # date-- there is no need to perform individual version comparisons between the existing + # objects and the supplied features. + # + # If possible, the store should update the entire data set atomically. If that is not possible, + # it should iterate through the outer hash and then the inner hash using the existing iteration + # order of those hashes (the SDK will ensure that the items were inserted into the hashes in + # the correct order), storing each item, and then delete any leftover items at the very end. + # + # @param all_data [Hash] a hash where each key is one of the data kind objects, and each + # value is in turn a hash of string keys to entities + # @return [void] + # + def init(all_data) + end + + # + # Returns the entity to which the specified key is mapped, if any. + # + # @param kind [Object] the kind of entity to get + # @param key [String] the unique key of the entity to get + # @return [Hash] the entity; nil if the key was not found, or if the stored entity's + # `:deleted` property was true + # + def get(kind, key) + end + + # + # Returns all stored entities of the specified kind, not including deleted entities. + # + # @param kind [Object] the kind of entity to get + # @return [Hash] a hash where each key is the entity's `:key` property and each value + # is the entity + # + def all(kind) + end + + # + # Attempt to add an entity, or update an existing entity with the same key. An update + # should only succeed if the new item's `:version` is greater than the old one; + # otherwise, the method should do nothing. + # + # @param kind [Object] the kind of entity to add or update + # @param item [Hash] the entity to add or update + # @return [void] + # + def upsert(kind, item) + end + + # + # Attempt to delete an entity if it exists. Deletion should only succeed if the + # `version` parameter is greater than the existing entity's `:version`; otherwise, the + # method should do nothing. + # + # @param kind [Object] the kind of entity to delete + # @param key [String] the unique key of the entity + # @param version [Integer] the entity must have a lower version than this to be deleted + # @return [void] + # + def delete(kind, key, version) + end + + # + # Checks whether this store has been initialized. That means that `init` has been called + # either by this process, or (if the store can be shared) by another process. This + # method will be called frequently, so it should be efficient. You can assume that if it + # has returned true once, it can continue to return true, i.e. a store cannot become + # uninitialized again. + # + # @return [Boolean] true if the store is in an initialized state + # + def initialized? + end + + # + # Performs any necessary cleanup to shut down the store when the client is being shut down. + # + # @return [void] + # + def stop + end + end + + # + # Mixin that defines the required methods of a data source implementation. This is the + # component that delivers feature flag data from LaunchDarkly to the LDClient by putting + # the data in the {FeatureStore}. It is expected to run concurrently on its own thread. + # + # The client has its own standard implementation, which uses either a streaming connection or + # polling depending on your configuration. Normally you will not need to use another one + # except for testing purposes. {FileDataSource} provides one such test fixture. + # + module DataSource + # + # Checks whether the data source has finished initializing. Initialization is considered done + # once it has received one complete data set from LaunchDarkly. + # + # @return [Boolean] true if initialization is complete + # + def initialized? + end + + # + # Puts the data source into an active state. Normally this means it will make its first + # connection attempt to LaunchDarkly. If `start` has already been called, calling it again + # should simply return the same value as the first call. + # + # @return [Concurrent::Event] an Event which will be set once initialization is complete + # + def start + end + + # + # Puts the data source into an inactive state and releases all of its resources. + # This state should be considered permanent (`start` does not have to work after `stop`). + # + def stop + end + end + end +end diff --git a/lib/ldclient-rb/ldclient.rb b/lib/ldclient-rb/ldclient.rb index f8a75780..a5799700 100644 --- a/lib/ldclient-rb/ldclient.rb +++ b/lib/ldclient-rb/ldclient.rb @@ -1,3 +1,4 @@ +require "ldclient-rb/impl/store_client_wrapper" require "concurrent/atomics" require "digest/sha1" require "logger" @@ -10,7 +11,6 @@ module LaunchDarkly # A client for LaunchDarkly. Client instances are thread-safe. Users # should create a single client instance for the lifetime of the application. # - # class LDClient include Evaluation # @@ -18,15 +18,28 @@ class LDClient # configuration parameter can also supplied to specify advanced options, # but for most use cases, the default configuration is appropriate. # + # The client will immediately attempt to connect to LaunchDarkly and retrieve + # your feature flag data. If it cannot successfully do so within the time limit + # specified by `wait_for_sec`, the constructor will return a client that is in + # an uninitialized state. See {#initialized?} for more details. # # @param sdk_key [String] the SDK key for your LaunchDarkly account # @param config [Config] an optional client configuration object + # @param wait_for_sec [Float] maximum time (in seconds) to wait for initialization # # @return [LDClient] The LaunchDarkly client instance + # def initialize(sdk_key, config = Config.default, wait_for_sec = 5) @sdk_key = sdk_key - @config = config - @store = config.feature_store + + # We need to wrap the feature store object with a FeatureStoreClientWrapper in order to add + # some necessary logic around updates. Unfortunately, we have code elsewhere that accesses + # the feature store through the Config object, so we need to make a new Config that uses + # the wrapped store. + @store = Impl::FeatureStoreClientWrapper.new(config.feature_store) + updated_config = config.clone + updated_config.instance_variable_set(:@feature_store, @store) + @config = updated_config if @config.offline? || !@config.send_events @event_processor = NullEventProcessor.new @@ -39,149 +52,193 @@ def initialize(sdk_key, config = Config.default, wait_for_sec = 5) return # requestor and update processor are not used in this mode end - if @config.update_processor - @update_processor = @config.update_processor + data_source_or_factory = @config.data_source || self.method(:create_default_data_source) + if data_source_or_factory.respond_to? :call + @data_source = data_source_or_factory.call(sdk_key, @config) else - factory = @config.update_processor_factory || self.method(:create_default_update_processor) - @update_processor = factory.call(sdk_key, config) + @data_source = data_source_or_factory end - ready = @update_processor.start + ready = @data_source.start if wait_for_sec > 0 ok = ready.wait(wait_for_sec) if !ok @config.logger.error { "[LDClient] Timeout encountered waiting for LaunchDarkly client initialization" } - elsif !@update_processor.initialized? + elsif !@data_source.initialized? @config.logger.error { "[LDClient] LaunchDarkly client initialization failed" } end end end + # + # Tells the client that all pending analytics events should be delivered as soon as possible. + # + # When the LaunchDarkly client generates analytics events (from {#variation}, {#variation_detail}, + # {#identify}, or {#track}), they are queued on a worker thread. The event thread normally + # sends all queued events to LaunchDarkly at regular intervals, controlled by the + # {Config#flush_interval} option. Calling `flush` triggers a send without waiting for the + # next interval. + # + # Flushing is asynchronous, so this method will return before it is complete. However, if you + # call {#close}, events are guaranteed to be sent before that method returns. + # def flush @event_processor.flush end - def toggle?(key, user, default = False) + # + # @param key [String] the feature flag key + # @param user [Hash] the user properties + # @param default [Boolean] (false) the value to use if the flag cannot be evaluated + # @return [Boolean] the flag value + # @deprecated Use {#variation} instead. + # + def toggle?(key, user, default = false) @config.logger.warn { "[LDClient] toggle? is deprecated. Use variation instead" } variation(key, user, default) end + # + # Creates a hash string that can be used by the JavaScript SDK to identify a user. + # For more information, see [Secure mode](https://docs.launchdarkly.com/docs/js-sdk-reference#section-secure-mode). + # + # @param user [Hash] the user properties + # @return [String] a hash string + # def secure_mode_hash(user) OpenSSL::HMAC.hexdigest("sha256", @sdk_key, user[:key].to_s) end - # Returns whether the client has been initialized and is ready to serve feature flag requests + # + # Returns whether the client has been initialized and is ready to serve feature flag requests. + # + # If this returns false, it means that the client did not succeed in connecting to + # LaunchDarkly within the time limit that you specified in the constructor. It could + # still succeed in connecting at a later time (on another thread), or it could have + # given up permanently (for instance, if your SDK key is invalid). In the meantime, + # any call to {#variation} or {#variation_detail} will behave as follows: + # + # 1. It will check whether the feature store already contains data (that is, you + # are using a database-backed store and it was populated by a previous run of this + # application). If so, it will use the last known feature flag data. + # + # 2. Failing that, it will return the value that you specified for the `default` + # parameter of {#variation} or {#variation_detail}. + # # @return [Boolean] true if the client has been initialized + # def initialized? - @config.offline? || @config.use_ldd? || @update_processor.initialized? + @config.offline? || @config.use_ldd? || @data_source.initialized? end # - # Determines the variation of a feature flag to present to a user. At a minimum, - # the user hash should contain a +:key+ . + # Determines the variation of a feature flag to present to a user. # - # @example Basic user hash - # {key: "user@example.com"} + # At a minimum, the user hash should contain a `:key`, which should be the unique + # identifier for your user (or, for an anonymous user, a session identifier or + # cookie). # - # For authenticated users, the +:key+ should be the unique identifier for - # your user. For anonymous users, the +:key+ should be a session identifier - # or cookie. In either case, the only requirement is that the key - # is unique to a user. + # Other supported user attributes include IP address, country code, and an arbitrary hash of + # custom attributes. For more about the supported user properties and how they work in + # LaunchDarkly, see [Targeting users](https://docs.launchdarkly.com/docs/targeting-users). + # + # The optional `:privateAttributeNames` user property allows you to specify a list of + # attribute names that should not be sent back to LaunchDarkly. + # [Private attributes](https://docs.launchdarkly.com/docs/private-user-attributes) + # can also be configured globally in {Config}. # - # You can also pass IP addresses and country codes in the user hash. + # @example Basic user hash + # {key: "my-user-id"} # # @example More complete user hash - # {key: "user@example.com", ip: "127.0.0.1", country: "US"} + # {key: "my-user-id", ip: "127.0.0.1", country: "US", custom: {customer_rank: 1000}} # - # The user hash can contain arbitrary custom attributes stored in a +:custom+ sub-hash: - # - # @example A user hash with custom attributes - # {key: "user@example.com", custom: {customer_rank: 1000, groups: ["google", "microsoft"]}} - # - # Attribute values in the custom hash can be integers, booleans, strings, or - # lists of integers, booleans, or strings. + # @example User with a private attribute + # {key: "my-user-id", email: "email@example.com", privateAttributeNames: ["email"]} # # @param key [String] the unique feature key for the feature flag, as shown # on the LaunchDarkly dashboard # @param user [Hash] a hash containing parameters for the end user requesting the flag - # @param default the default value of the flag + # @param default the default value of the flag; this is used if there is an error + # condition making it impossible to find or evaluate the flag + # + # @return the variation to show the user, or the default value if there's an an error # - # @return the variation to show the user, or the - # default value if there's an an error def variation(key, user, default) evaluate_internal(key, user, default, false).value end # - # Determines the variation of a feature flag for a user, like `variation`, but also + # Determines the variation of a feature flag for a user, like {#variation}, but also # provides additional information about how this value was calculated. # - # The return value of `variation_detail` is an `EvaluationDetail` object, which has - # three properties: - # - # `value`: the value that was calculated for this user (same as the return value - # of `variation`) + # The return value of `variation_detail` is an {EvaluationDetail} object, which has + # three properties: the result value, the positional index of this value in the flag's + # list of variations, and an object describing the main reason why this value was + # selected. See {EvaluationDetail} for more on these properties. # - # `variation_index`: the positional index of this value in the flag, e.g. 0 for the - # first variation - or `nil` if the default value was returned + # Calling `variation_detail` instead of `variation` also causes the "reason" data to + # be included in analytics events, if you are capturing detailed event data for this flag. # - # `reason`: a hash describing the main reason why this value was selected. Its `:kind` - # property will be one of the following: - # - # * `'OFF'`: the flag was off and therefore returned its configured off value - # * `'FALLTHROUGH'`: the flag was on but the user did not match any targets or rules - # * `'TARGET_MATCH'`: the user key was specifically targeted for this flag - # * `'RULE_MATCH'`: the user matched one of the flag's rules; the `:ruleIndex` and - # `:ruleId` properties indicate the positional index and unique identifier of the rule - # * `'PREREQUISITE_FAILED`': the flag was considered off because it had at least one - # prerequisite flag that either was off or did not return the desired variation; the - # `:prerequisiteKey` property indicates the key of the prerequisite that failed - # * `'ERROR'`: the flag could not be evaluated, e.g. because it does not exist or due - # to an unexpected error, and therefore returned the default value; the `:errorKind` - # property describes the nature of the error, such as `'FLAG_NOT_FOUND'` - # - # The `reason` will also be included in analytics events, if you are capturing - # detailed event data for this flag. + # For more information, see the reference guide on + # [Evaluation reasons](https://docs.launchdarkly.com/v2.0/docs/evaluation-reasons). # # @param key [String] the unique feature key for the feature flag, as shown # on the LaunchDarkly dashboard # @param user [Hash] a hash containing parameters for the end user requesting the flag - # @param default the default value of the flag + # @param default the default value of the flag; this is used if there is an error + # condition making it impossible to find or evaluate the flag # - # @return an `EvaluationDetail` object describing the result + # @return [EvaluationDetail] an object describing the result # def variation_detail(key, user, default) evaluate_internal(key, user, default, true) end # - # Registers the user + # Registers the user. This method simply creates an analytics event containing the user + # properties, so that LaunchDarkly will know about that user if it does not already. # - # @param [Hash] The user to register + # Calling {#variation} or {#variation_detail} also sends the user information to + # LaunchDarkly (if events are enabled), so you only need to use {#identify} if you + # want to identify the user without evaluating a flag. # + # Note that event delivery is asynchronous, so the event may not actually be sent + # until later; see {#flush}. + # + # @param user [Hash] The user to register; this can have all the same user properties + # described in {#variation} # @return [void] + # def identify(user) sanitize_user(user) @event_processor.add_event(kind: "identify", key: user[:key], user: user) end # - # Tracks that a user performed an event + # Tracks that a user performed an event. This method creates a "custom" analytics event + # containing the specified event name (key), user properties, and optional data. + # + # Note that event delivery is asynchronous, so the event may not actually be sent + # until later; see {#flush}. # # @param event_name [String] The name of the event - # @param user [Hash] The user that performed the event. This should be the same user hash used in calls to {#toggle?} + # @param user [Hash] The user to register; this can have all the same user properties + # described in {#variation} # @param data [Hash] A hash containing any additional data associated with the event - # # @return [void] + # def track(event_name, user, data) sanitize_user(user) @event_processor.add_event(kind: "custom", key: event_name, user: user, data: data) end # - # Returns all feature flag values for the given user. This method is deprecated - please use - # {#all_flags_state} instead. Current versions of the client-side SDK will not generate analytics - # events correctly if you pass the result of all_flags. + # Returns all feature flag values for the given user. + # + # @deprecated Please use {#all_flags_state} instead. Current versions of the + # client-side SDK will not generate analytics events correctly if you pass the + # result of `all_flags`. # # @param user [Hash] The end user requesting the feature flags # @return [Hash] a hash of feature flag keys to values @@ -191,21 +248,21 @@ def all_flags(user) end # - # Returns a FeatureFlagsState object that encapsulates the state of all feature flags for a given user, + # Returns a {FeatureFlagsState} object that encapsulates the state of all feature flags for a given user, # including the flag values and also metadata that can be used on the front end. This method does not # send analytics events back to LaunchDarkly. # # @param user [Hash] The end user requesting the feature flags - # @param options={} [Hash] Optional parameters to control how the state is generated + # @param options [Hash] Optional parameters to control how the state is generated # @option options [Boolean] :client_side_only (false) True if only flags marked for use with the # client-side SDK should be included in the state. By default, all flags are included. # @option options [Boolean] :with_reasons (false) True if evaluation reasons should be included - # in the state (see `variation_detail`). By default, they are not included. + # in the state (see {#variation_detail}). By default, they are not included. # @option options [Boolean] :details_only_for_tracked_flags (false) True if any flag metadata that is - # normally only used for event generation - such as flag versions and evaluation reasons - should be - # omitted for any flag that does not have event tracking or debugging turned on. This reduces the size - # of the JSON data if you are passing the flag state to the front end. - # @return [FeatureFlagsState] a FeatureFlagsState object which can be serialized to JSON + # normally only used for event generation - such as flag versions and evaluation reasons - should be + # omitted for any flag that does not have event tracking or debugging turned on. This reduces the size + # of the JSON data if you are passing the flag state to the front end. + # @return [FeatureFlagsState] a {FeatureFlagsState} object which can be serialized to JSON # def all_flags_state(user, options={}) return FeatureFlagsState.new(false) if @config.offline? @@ -246,19 +303,19 @@ def all_flags_state(user, options={}) end # - # Releases all network connections and other resources held by the client, making it no longer usable + # Releases all network connections and other resources held by the client, making it no longer usable. # # @return [void] def close @config.logger.info { "[LDClient] Closing LaunchDarkly client..." } - @update_processor.stop + @data_source.stop @event_processor.stop @store.stop end private - def create_default_update_processor(sdk_key, config) + def create_default_data_source(sdk_key, config) if config.offline? return NullUpdateProcessor.new end @@ -351,6 +408,7 @@ def make_feature_event(flag, user, detail, default, with_reasons) # # Used internally when the client is offline. + # @private # class NullUpdateProcessor def start diff --git a/lib/ldclient-rb/memoized_value.rb b/lib/ldclient-rb/memoized_value.rb index 3ba766a6..ddddb7e0 100644 --- a/lib/ldclient-rb/memoized_value.rb +++ b/lib/ldclient-rb/memoized_value.rb @@ -2,6 +2,8 @@ module LaunchDarkly # Simple implementation of a thread-safe memoized value whose generator function will never be # run more than once, and whose value can be overridden by explicit assignment. + # Note that we no longer use this class and it will be removed in a future version. + # @private class MemoizedValue def initialize(&generator) @generator = generator diff --git a/lib/ldclient-rb/newrelic.rb b/lib/ldclient-rb/newrelic.rb index ed6eb4e4..5c9b7d48 100644 --- a/lib/ldclient-rb/newrelic.rb +++ b/lib/ldclient-rb/newrelic.rb @@ -1,4 +1,5 @@ module LaunchDarkly + # @private class LDNewRelic begin require "newrelic_rpm" diff --git a/lib/ldclient-rb/non_blocking_thread_pool.rb b/lib/ldclient-rb/non_blocking_thread_pool.rb index 81b7ea14..28ec42a9 100644 --- a/lib/ldclient-rb/non_blocking_thread_pool.rb +++ b/lib/ldclient-rb/non_blocking_thread_pool.rb @@ -3,10 +3,10 @@ require "concurrent/executors" require "thread" -# Simple wrapper for a FixedThreadPool that rejects new jobs if all the threads are busy, rather -# than blocking. Also provides a way to wait for all jobs to finish without shutting down. - module LaunchDarkly + # Simple wrapper for a FixedThreadPool that rejects new jobs if all the threads are busy, rather + # than blocking. Also provides a way to wait for all jobs to finish without shutting down. + # @private class NonBlockingThreadPool def initialize(capacity) @capacity = capacity diff --git a/lib/ldclient-rb/polling.rb b/lib/ldclient-rb/polling.rb index 4ecd93f8..4c6769f3 100644 --- a/lib/ldclient-rb/polling.rb +++ b/lib/ldclient-rb/polling.rb @@ -2,6 +2,7 @@ require "thread" module LaunchDarkly + # @private class PollingProcessor def initialize(config, requestor) @config = config diff --git a/lib/ldclient-rb/redis_store.rb b/lib/ldclient-rb/redis_store.rb index c9b1bc64..392f5d2e 100644 --- a/lib/ldclient-rb/redis_store.rb +++ b/lib/ldclient-rb/redis_store.rb @@ -1,5 +1,5 @@ -require "concurrent/atomics" -require "json" +require "ldclient-rb/interfaces" +require "ldclient-rb/impl/integrations/redis_impl" module LaunchDarkly # @@ -12,14 +12,16 @@ module LaunchDarkly # installed. Then, create an instance and store it in the `feature_store` property # of your client configuration. # + # @deprecated Use the factory method in {LaunchDarkly::Integrations::Redis} instead. This specific + # implementation class may be changed or removed in the future. + # class RedisFeatureStore - begin - require "redis" - require "connection_pool" - REDIS_ENABLED = true - rescue ScriptError, StandardError - REDIS_ENABLED = false - end + include LaunchDarkly::Interfaces::FeatureStore + + # Note that this class is now just a facade around CachingStoreWrapper, which is in turn delegating + # to RedisFeatureStoreCore where the actual database logic is. This class was retained for historical + # reasons, so that existing code can still call RedisFeatureStore.new. In the future, we will migrate + # away from exposing these concrete classes and use factory methods instead. # # Constructor for a RedisFeatureStore instance. @@ -30,45 +32,13 @@ class RedisFeatureStore # @option opts [String] :prefix namespace prefix to add to all hash keys used by LaunchDarkly # @option opts [Logger] :logger a `Logger` instance; defaults to `Config.default_logger` # @option opts [Integer] :max_connections size of the Redis connection pool - # @option opts [Integer] :expiration expiration time for the in-memory cache, in seconds; 0 for no local caching + # @option opts [Integer] :expiration_seconds expiration time for the in-memory cache, in seconds; 0 for no local caching # @option opts [Integer] :capacity maximum number of feature flags (or related objects) to cache locally - # @option opts [Object] :pool custom connection pool, used for testing only + # @option opts [Object] :pool custom connection pool, if desired # def initialize(opts = {}) - if !REDIS_ENABLED - raise RuntimeError.new("can't use RedisFeatureStore because one of these gems is missing: redis, connection_pool") - end - @redis_opts = opts[:redis_opts] || Hash.new - if opts[:redis_url] - @redis_opts[:url] = opts[:redis_url] - end - if !@redis_opts.include?(:url) - @redis_opts[:url] = RedisFeatureStore.default_redis_url - end - max_connections = opts[:max_connections] || 16 - @pool = opts[:pool] || ConnectionPool.new(size: max_connections) do - Redis.new(@redis_opts) - end - @prefix = opts[:prefix] || RedisFeatureStore.default_prefix - @logger = opts[:logger] || Config.default_logger - - expiration_seconds = opts[:expiration] || 15 - capacity = opts[:capacity] || 1000 - if expiration_seconds > 0 - @cache = ExpiringCache.new(capacity, expiration_seconds) - else - @cache = nil - end - - @stopped = Concurrent::AtomicBoolean.new(false) - @inited = MemoizedValue.new { - query_inited - } - - with_connection do |redis| - @logger.info("RedisFeatureStore: using Redis instance at #{redis.connection[:host]}:#{redis.connection[:port]} \ -and prefix: #{@prefix}") - end + core = LaunchDarkly::Impl::Integrations::Redis::RedisFeatureStoreCore.new(opts) + @wrapper = LaunchDarkly::Integrations::Util::CachingStoreWrapper.new(core, opts) end # @@ -76,178 +46,42 @@ def initialize(opts = {}) # running at `localhost` with its default port. # def self.default_redis_url - 'redis://localhost:6379/0' + LaunchDarkly::Integrations::Redis::default_redis_url end # # Default value for the `prefix` constructor parameter. # def self.default_prefix - 'launchdarkly' + LaunchDarkly::Integrations::Redis::default_prefix end def get(kind, key) - f = @cache.nil? ? nil : @cache[cache_key(kind, key)] - if f.nil? - @logger.debug { "RedisFeatureStore: no cache hit for #{key} in '#{kind[:namespace]}', requesting from Redis" } - f = with_connection do |redis| - begin - get_redis(kind, redis, key.to_sym) - rescue => e - @logger.error { "RedisFeatureStore: could not retrieve #{key} from Redis in '#{kind[:namespace]}', with error: #{e}" } - nil - end - end - end - if f.nil? - @logger.debug { "RedisFeatureStore: #{key} not found in '#{kind[:namespace]}'" } - nil - elsif f[:deleted] - @logger.debug { "RedisFeatureStore: #{key} was deleted in '#{kind[:namespace]}', returning nil" } - nil - else - f - end + @wrapper.get(kind, key) end def all(kind) - fs = {} - with_connection do |redis| - begin - hashfs = redis.hgetall(items_key(kind)) - rescue => e - @logger.error { "RedisFeatureStore: could not retrieve all '#{kind[:namespace]}' items from Redis with error: #{e}; returning none" } - hashfs = {} - end - hashfs.each do |k, jsonItem| - f = JSON.parse(jsonItem, symbolize_names: true) - if !f[:deleted] - fs[k.to_sym] = f - end - end - end - fs + @wrapper.all(kind) end def delete(kind, key, version) - update_with_versioning(kind, { key: key, version: version, deleted: true }) + @wrapper.delete(kind, key, version) end def init(all_data) - @cache.clear if !@cache.nil? - count = 0 - with_connection do |redis| - all_data.each do |kind, items| - begin - redis.multi do |multi| - multi.del(items_key(kind)) - count = count + items.count - items.each { |key, item| - redis.hset(items_key(kind), key, item.to_json) - } - end - items.each { |key, item| - put_cache(kind, key.to_sym, item) - } - rescue => e - @logger.error { "RedisFeatureStore: could not initialize '#{kind[:namespace]}' in Redis, error: #{e}" } - end - end - end - @inited.set(true) - @logger.info { "RedisFeatureStore: initialized with #{count} items" } + @wrapper.init(all_data) end def upsert(kind, item) - update_with_versioning(kind, item) + @wrapper.upsert(kind, item) end def initialized? - @inited.get + @wrapper.initialized? end def stop - if @stopped.make_true - @pool.shutdown { |redis| redis.close } - @cache.clear if !@cache.nil? - end - end - - private - - # exposed for testing - def before_update_transaction(base_key, key) - end - - def items_key(kind) - @prefix + ":" + kind[:namespace] - end - - def cache_key(kind, key) - kind[:namespace] + ":" + key.to_s - end - - def with_connection - @pool.with { |redis| yield(redis) } - end - - def get_redis(kind, redis, key) - begin - json_item = redis.hget(items_key(kind), key) - if json_item - item = JSON.parse(json_item, symbolize_names: true) - put_cache(kind, key, item) - item - else - nil - end - rescue => e - @logger.error { "RedisFeatureStore: could not retrieve #{key} from Redis, error: #{e}" } - nil - end - end - - def put_cache(kind, key, value) - @cache[cache_key(kind, key)] = value if !@cache.nil? - end - - def update_with_versioning(kind, new_item) - base_key = items_key(kind) - key = new_item[:key] - try_again = true - while try_again - try_again = false - with_connection do |redis| - redis.watch(base_key) do - old_item = get_redis(kind, redis, key) - before_update_transaction(base_key, key) - if old_item.nil? || old_item[:version] < new_item[:version] - begin - result = redis.multi do |multi| - multi.hset(base_key, key, new_item.to_json) - end - if result.nil? - @logger.debug { "RedisFeatureStore: concurrent modification detected, retrying" } - try_again = true - else - put_cache(kind, key.to_sym, new_item) - end - rescue => e - @logger.error { "RedisFeatureStore: could not store #{key} in Redis, error: #{e}" } - end - else - action = new_item[:deleted] ? "delete" : "update" - @logger.warn { "RedisFeatureStore: attempted to #{action} #{key} version: #{old_item[:version]} \ - in '#{kind[:namespace]}' with a version that is the same or older: #{new_item[:version]}" } - end - redis.unwatch - end - end - end - end - - def query_inited - with_connection { |redis| redis.exists(items_key(FEATURES)) } + @wrapper.stop end end end diff --git a/lib/ldclient-rb/requestor.rb b/lib/ldclient-rb/requestor.rb index 25cce121..8922e82c 100644 --- a/lib/ldclient-rb/requestor.rb +++ b/lib/ldclient-rb/requestor.rb @@ -3,7 +3,7 @@ require "faraday/http_cache" module LaunchDarkly - + # @private class UnexpectedResponseError < StandardError def initialize(status) @status = status @@ -14,12 +14,13 @@ def status end end + # @private class Requestor def initialize(sdk_key, config) @sdk_key = sdk_key @config = config @client = Faraday.new do |builder| - builder.use :http_cache, store: @config.cache_store + builder.use :http_cache, store: @config.cache_store, serializer: Marshal builder.adapter :net_http_persistent end diff --git a/lib/ldclient-rb/simple_lru_cache.rb b/lib/ldclient-rb/simple_lru_cache.rb index 64b1a709..4eda4e27 100644 --- a/lib/ldclient-rb/simple_lru_cache.rb +++ b/lib/ldclient-rb/simple_lru_cache.rb @@ -2,6 +2,7 @@ module LaunchDarkly # A non-thread-safe implementation of a LRU cache set with only add and reset methods. # Based on https://github.com/SamSaffron/lru_redux/blob/master/lib/lru_redux/cache.rb + # @private class SimpleLRUCacheSet def initialize(capacity) @values = {} diff --git a/lib/ldclient-rb/stream.rb b/lib/ldclient-rb/stream.rb index 2151e945..094a37b2 100644 --- a/lib/ldclient-rb/stream.rb +++ b/lib/ldclient-rb/stream.rb @@ -1,20 +1,28 @@ require "concurrent/atomics" require "json" -require "sse_client" +require "ld-eventsource" module LaunchDarkly + # @private PUT = :put + # @private PATCH = :patch + # @private DELETE = :delete + # @private INDIRECT_PUT = :'indirect/put' + # @private INDIRECT_PATCH = :'indirect/patch' + # @private READ_TIMEOUT_SECONDS = 300 # 5 minutes; the stream should send a ping every 3 minutes + # @private KEY_PATHS = { FEATURES => "/flags/", SEGMENTS => "/segments/" } + # @private class StreamProcessor def initialize(sdk_key, config, requestor) @sdk_key = sdk_key @@ -46,15 +54,18 @@ def start read_timeout: READ_TIMEOUT_SECONDS, logger: @config.logger } - @es = SSE::SSEClient.new(@config.stream_uri + "/all", opts) do |conn| - conn.on_event { |event| process_message(event, event.type) } + @es = SSE::Client.new(@config.stream_uri + "/all", **opts) do |conn| + conn.on_event { |event| process_message(event) } conn.on_error { |err| - status = err[:status_code] - message = Util.http_error_message(status, "streaming connection", "will retry") - @config.logger.error { "[LDClient] #{message}" } - if !Util.http_error_recoverable?(status) - @ready.set # if client was waiting on us, make it stop waiting - has no effect if already set - stop + case err + when SSE::Errors::HTTPStatusError + status = err.status + message = Util.http_error_message(status, "streaming connection", "will retry") + @config.logger.error { "[LDClient] #{message}" } + if !Util.http_error_recoverable?(status) + @ready.set # if client was waiting on us, make it stop waiting - has no effect if already set + stop + end end } end @@ -71,7 +82,8 @@ def stop private - def process_message(message, method) + def process_message(message) + method = message.type @config.logger.debug { "[LDClient] Stream received #{method} message: #{message.data}" } if method == PUT message = JSON.parse(message.data, symbolize_names: true) diff --git a/lib/ldclient-rb/user_filter.rb b/lib/ldclient-rb/user_filter.rb index 449d8d2e..8cbf67ca 100644 --- a/lib/ldclient-rb/user_filter.rb +++ b/lib/ldclient-rb/user_filter.rb @@ -2,6 +2,7 @@ require "set" module LaunchDarkly + # @private class UserFilter def initialize(config) @all_attributes_private = config.all_attributes_private diff --git a/lib/ldclient-rb/util.rb b/lib/ldclient-rb/util.rb index 707ba3ce..e303e18a 100644 --- a/lib/ldclient-rb/util.rb +++ b/lib/ldclient-rb/util.rb @@ -1,5 +1,6 @@ module LaunchDarkly + # @private module Util def self.log_exception(logger, message, exc) logger.error { "[LDClient] #{message}: #{exc.inspect}" } diff --git a/lib/sse_client.rb b/lib/sse_client.rb deleted file mode 100644 index dd24c3a6..00000000 --- a/lib/sse_client.rb +++ /dev/null @@ -1,4 +0,0 @@ -require "sse_client/streaming_http" -require "sse_client/sse_events" -require "sse_client/backoff" -require "sse_client/sse_client" diff --git a/lib/sse_client/backoff.rb b/lib/sse_client/backoff.rb deleted file mode 100644 index 73e0754f..00000000 --- a/lib/sse_client/backoff.rb +++ /dev/null @@ -1,38 +0,0 @@ - -module SSE - # - # A simple backoff algorithm that can be reset at any time, or reset itself after a given - # interval has passed without errors. - # - class Backoff - def initialize(base_interval, max_interval, auto_reset_interval = 60) - @base_interval = base_interval - @max_interval = max_interval - @auto_reset_interval = auto_reset_interval - @attempts = 0 - @last_good_time = nil - @jitter_rand = Random.new - end - - attr_accessor :base_interval - - def next_interval - if !@last_good_time.nil? && (Time.now.to_i - @last_good_time) >= @auto_reset_interval - @attempts = 0 - end - @last_good_time = nil - if @attempts == 0 - @attempts += 1 - return 0 - end - @last_good_time = nil - target = ([@base_interval * (2 ** @attempts), @max_interval].min).to_f - @attempts += 1 - (target / 2) + @jitter_rand.rand(target / 2) - end - - def mark_success - @last_good_time = Time.now.to_i if @last_good_time.nil? - end - end -end diff --git a/lib/sse_client/sse_client.rb b/lib/sse_client/sse_client.rb deleted file mode 100644 index 9f285360..00000000 --- a/lib/sse_client/sse_client.rb +++ /dev/null @@ -1,171 +0,0 @@ -require "concurrent/atomics" -require "logger" -require "thread" -require "uri" - -module SSE - # - # A lightweight Server-Sent Events implementation, relying on two gems: socketry for sockets with - # read timeouts, and http_tools for HTTP response parsing. The overall logic is based on - # [https://github.com/Tonkpils/celluloid-eventsource]. - # - class SSEClient - DEFAULT_CONNECT_TIMEOUT = 10 - DEFAULT_READ_TIMEOUT = 300 - DEFAULT_RECONNECT_TIME = 1 - MAX_RECONNECT_TIME = 30 - - def initialize(uri, options = {}) - @uri = URI(uri) - @stopped = Concurrent::AtomicBoolean.new(false) - - @headers = options[:headers] ? options[:headers].clone : {} - @connect_timeout = options[:connect_timeout] || DEFAULT_CONNECT_TIMEOUT - @read_timeout = options[:read_timeout] || DEFAULT_READ_TIMEOUT - @logger = options[:logger] || default_logger - - if options[:proxy] - @proxy = options[:proxy] - else - proxyUri = @uri.find_proxy - if !proxyUri.nil? && (proxyUri.scheme == 'http' || proxyUri.scheme == 'https') - @proxy = proxyUri - end - end - - reconnect_time = options[:reconnect_time] || DEFAULT_RECONNECT_TIME - @backoff = Backoff.new(reconnect_time, MAX_RECONNECT_TIME) - - @on = { event: ->(_) {}, error: ->(_) {} } - @last_id = nil - - yield self if block_given? - - Thread.new do - run_stream - end - end - - def on(event_name, &action) - @on[event_name.to_sym] = action - end - - def on_event(&action) - @on[:event] = action - end - - def on_error(&action) - @on[:error] = action - end - - def close - if @stopped.make_true - @cxn.close if !@cxn.nil? - @cxn = nil - end - end - - private - - def default_logger - log = ::Logger.new($stdout) - log.level = ::Logger::WARN - log - end - - def run_stream - while !@stopped.value - @cxn = nil - begin - @cxn = connect - # There's a potential race if close was called in the middle of the previous line, i.e. after we - # connected but before @cxn was set. Checking the variable again is a bit clunky but avoids that. - return if @stopped.value - read_stream(@cxn) if !@cxn.nil? - rescue Errno::EBADF - # don't log this - it probably means we closed our own connection deliberately - rescue StandardError => e - @logger.error { "Unexpected error from event source: #{e.inspect}" } - @logger.debug { "Exception trace: #{e.backtrace}" } - end - begin - @cxn.close if !@cxn.nil? - rescue StandardError => e - @logger.error { "Unexpected error while closing stream: #{e.inspect}" } - @logger.debug { "Exception trace: #{e.backtrace}" } - end - end - end - - # Try to establish a streaming connection. Returns the StreamingHTTPConnection object if successful. - def connect - loop do - return if @stopped.value - interval = @backoff.next_interval - if interval > 0 - @logger.warn { "Will retry connection after #{'%.3f' % interval} seconds" } - sleep(interval) - end - begin - cxn = open_connection(build_headers) - if cxn.status != 200 - body = cxn.read_all # grab the whole response body in case it has error details - cxn.close - @on[:error].call({status_code: cxn.status, body: body}) - next - elsif cxn.headers["content-type"] && cxn.headers["content-type"].start_with?("text/event-stream") - return cxn # we're good to proceed - end - @logger.error { "Event source returned unexpected content type '#{cxn.headers["content-type"]}'" } - rescue Errno::EBADF - raise - rescue StandardError => e - @logger.error { "Unexpected error from event source: #{e.inspect}" } - @logger.debug { "Exception trace: #{e.backtrace}" } - cxn.close if !cxn.nil? - end - # if unsuccessful, continue the loop to connect again - end - end - - # Just calls the StreamingHTTPConnection constructor - factored out for test purposes - def open_connection(headers) - StreamingHTTPConnection.new(@uri, @proxy, headers, @connect_timeout, @read_timeout) - end - - # Pipe the output of the StreamingHTTPConnection into the EventParser, and dispatch events as - # they arrive. - def read_stream(cxn) - event_parser = EventParser.new(cxn.read_lines) - event_parser.items.each do |item| - return if @stopped.value - case item - when SSEEvent - dispatch_event(item) - when SSESetRetryInterval - @backoff.base_interval = event.milliseconds.t-Of / 1000 - end - end - end - - def dispatch_event(event) - @last_id = event.id - - # Tell the Backoff object that as of the current time, we have succeeded in getting some data. It - # uses that information so it can automatically reset itself if enough time passes between failures. - @backoff.mark_success - - # Pass the event to the caller - @on[:event].call(event) - end - - def build_headers - h = { - 'Accept' => 'text/event-stream', - 'Cache-Control' => 'no-cache' - } - h['Last-Event-Id'] = @last_id if !@last_id.nil? - h.merge(@headers) - end - end -end diff --git a/lib/sse_client/sse_events.rb b/lib/sse_client/sse_events.rb deleted file mode 100644 index 762cc2b0..00000000 --- a/lib/sse_client/sse_events.rb +++ /dev/null @@ -1,67 +0,0 @@ - -module SSE - # Server-Sent Event type used by SSEClient and EventParser. - SSEEvent = Struct.new(:type, :data, :id) - - SSESetRetryInterval = Struct.new(:milliseconds) - - # - # Accepts lines of text via an iterator, and parses them into SSE messages. - # - class EventParser - def initialize(lines) - @lines = lines - reset_buffers - end - - # Generator that parses the input interator and returns instances of SSEEvent or SSERetryInterval. - def items - Enumerator.new do |gen| - @lines.each do |line| - line.chomp! - if line.empty? - event = maybe_create_event - reset_buffers - gen.yield event if !event.nil? - else - case line - when /^(\w+): ?(.*)$/ - item = process_field($1, $2) - gen.yield item if !item.nil? - end - end - end - end - end - - private - - def reset_buffers - @id = nil - @type = nil - @data = "" - end - - def process_field(name, value) - case name - when "event" - @type = value.to_sym - when "data" - @data << "\n" if !@data.empty? - @data << value - when "id" - @id = value - when "retry" - if /^(?\d+)$/ =~ value - return SSESetRetryInterval.new(num.to_i) - end - end - nil - end - - def maybe_create_event - return nil if @data.empty? - SSEEvent.new(@type || :message, @data, @id) - end - end -end diff --git a/lib/sse_client/streaming_http.rb b/lib/sse_client/streaming_http.rb deleted file mode 100644 index eeb80e82..00000000 --- a/lib/sse_client/streaming_http.rb +++ /dev/null @@ -1,199 +0,0 @@ -require "concurrent/atomics" -require "http_tools" -require "socketry" - -module SSE - # - # Wrapper around a socket providing a simplified HTTP request-response cycle including streaming. - # The socket is created and managed by Socketry, which we use so that we can have a read timeout. - # - class StreamingHTTPConnection - attr_reader :status, :headers - - def initialize(uri, proxy, headers, connect_timeout, read_timeout) - @socket = HTTPConnectionFactory.connect(uri, proxy, connect_timeout, read_timeout) - @socket.write(build_request(uri, headers)) - @reader = HTTPResponseReader.new(@socket, read_timeout) - @status = @reader.status - @headers = @reader.headers - @closed = Concurrent::AtomicBoolean.new(false) - end - - def close - if @closed.make_true - @socket.close if @socket - @socket = nil - end - end - - # Generator that returns one line of the response body at a time (delimited by \r, \n, - # or \r\n) until the response is fully consumed or the socket is closed. - def read_lines - @reader.read_lines - end - - # Consumes the entire response body and returns it. - def read_all - @reader.read_all - end - - private - - # Build an HTTP request line and headers. - def build_request(uri, headers) - ret = "GET #{uri.request_uri} HTTP/1.1\r\n" - ret << "Host: #{uri.host}\r\n" - headers.each { |k, v| - ret << "#{k}: #{v}\r\n" - } - ret + "\r\n" - end - end - - # - # Used internally to send the HTTP request, including the proxy dialogue if necessary. - # - class HTTPConnectionFactory - def self.connect(uri, proxy, connect_timeout, read_timeout) - if !proxy - return open_socket(uri, connect_timeout) - end - - socket = open_socket(proxy, connect_timeout) - socket.write(build_proxy_request(uri, proxy)) - - # temporarily create a reader just for the proxy connect response - proxy_reader = HTTPResponseReader.new(socket, read_timeout) - if proxy_reader.status != 200 - raise ProxyError, "proxy connection refused, status #{proxy_reader.status}" - end - - # start using TLS at this point if appropriate - if uri.scheme.downcase == 'https' - wrap_socket_in_ssl_socket(socket) - else - socket - end - end - - private - - def self.open_socket(uri, connect_timeout) - if uri.scheme.downcase == 'https' - Socketry::SSL::Socket.connect(uri.host, uri.port, timeout: connect_timeout) - else - Socketry::TCP::Socket.connect(uri.host, uri.port, timeout: connect_timeout) - end - end - - # Build a proxy connection header. - def self.build_proxy_request(uri, proxy) - ret = "CONNECT #{uri.host}:#{uri.port} HTTP/1.1\r\n" - ret << "Host: #{uri.host}:#{uri.port}\r\n" - if proxy.user || proxy.password - encoded_credentials = Base64.strict_encode64([proxy.user || '', proxy.password || ''].join(":")) - ret << "Proxy-Authorization: Basic #{encoded_credentials}\r\n" - end - ret << "\r\n" - ret - end - - def self.wrap_socket_in_ssl_socket(socket) - io = IO.try_convert(socket) - ssl_sock = OpenSSL::SSL::SSLSocket.new(io, OpenSSL::SSL::SSLContext.new) - ssl_sock.connect - Socketry::SSL::Socket.new.from_socket(ssl_sock) - end - end - - class ProxyError < StandardError - def initialize(message) - super - end - end - - # - # Used internally to read the HTTP response, either all at once or as a stream of text lines. - # Incoming data is fed into an instance of HTTPTools::Parser, which gives us the header and - # chunks of the body via callbacks. - # - class HTTPResponseReader - DEFAULT_CHUNK_SIZE = 10000 - - attr_reader :status, :headers - - def initialize(socket, read_timeout) - @socket = socket - @read_timeout = read_timeout - @parser = HTTPTools::Parser.new - @buffer = "" - @done = false - @lock = Mutex.new - - # Provide callbacks for the Parser to give us the headers and body. This has to be done - # before we start piping any data into the parser. - have_headers = false - @parser.on(:header) do - have_headers = true - end - @parser.on(:stream) do |data| - @lock.synchronize { @buffer << data } # synchronize because we're called from another thread in Socketry - end - @parser.on(:finish) do - @lock.synchronize { @done = true } - end - - # Block until the status code and headers have been successfully read. - while !have_headers - raise EOFError if !read_chunk_into_buffer - end - @headers = Hash[@parser.header.map { |k,v| [k.downcase, v] }] - @status = @parser.status_code - end - - def read_lines - Enumerator.new do |gen| - loop do - line = read_line - break if line.nil? - gen.yield line - end - end - end - - def read_all - while read_chunk_into_buffer - end - @buffer - end - - private - - # Attempt to read some more data from the socket. Return true if successful, false if EOF. - # A read timeout will result in an exception from Socketry's readpartial method. - def read_chunk_into_buffer - # If @done is set, it means the Parser has signaled end of response body - @lock.synchronize { return false if @done } - data = @socket.readpartial(DEFAULT_CHUNK_SIZE, timeout: @read_timeout) - return false if data == :eof - @parser << data - # We are piping the content through the parser so that it can handle things like chunked - # encoding for us. The content ends up being appended to @buffer via our callback. - true - end - - # Extract the next line of text from the read buffer, refilling the buffer as needed. - def read_line - loop do - @lock.synchronize do - i = @buffer.index(/[\r\n]/) - if !i.nil? - i += 1 if (@buffer[i] == "\r" && i < @buffer.length - 1 && @buffer[i + 1] == "\n") - return @buffer.slice!(0, i + 1).force_encoding(Encoding::UTF_8) - end - end - return nil if !read_chunk_into_buffer - end - end - end -end diff --git a/scripts/gendocs.sh b/scripts/gendocs.sh new file mode 100755 index 00000000..1e545955 --- /dev/null +++ b/scripts/gendocs.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# Use this script to generate documentation locally in ./doc so it can be proofed before release. +# After release, documentation will be visible at https://www.rubydoc.info/gems/ldclient-rb + +gem install --conservative yard +gem install --conservative redcarpet # provides Markdown formatting + +# yard doesn't seem to do recursive directories, even though Ruby's Dir.glob supposedly recurses for "**" +PATHS="lib/*.rb lib/**/*.rb lib/**/**/*.rb lib/**/**/**/*.rb" + +yard doc --no-private --markup markdown --markup-provider redcarpet --embed-mixins $PATHS - README.md diff --git a/spec/feature_store_spec_base.rb b/spec/feature_store_spec_base.rb index d6c1cedc..2d06f0ff 100644 --- a/spec/feature_store_spec_base.rb +++ b/spec/feature_store_spec_base.rb @@ -1,112 +1,213 @@ require "spec_helper" -RSpec.shared_examples "feature_store" do |create_store_method| +shared_examples "feature_store" do |create_store_method, clear_data_method| - let(:feature0) { + # Rather than testing with feature flag or segment data, we'll use this fake data kind + # to make it clear that feature stores need to be able to handle arbitrary data. + let(:things_kind) { { namespace: "things" } } + + let(:key1) { "thing1" } + let(:thing1) { { - key: "test-feature-flag", + key: key1, + name: "Thing 1", version: 11, - on: true, - prerequisites: [], - salt: "718ea30a918a4eba8734b57ab1a93227", - sel: "fe1244e5378c4f99976c9634e33667c6", - targets: [ - { - values: [ "alice" ], - variation: 0 - }, - { - values: [ "bob" ], - variation: 1 - } - ], - rules: [], - fallthrough: { variation: 0 }, - offVariation: 1, - variations: [ true, false ], deleted: false } } - let(:key0) { feature0[:key].to_sym } + let(:unused_key) { "no" } + + let(:create_store) { create_store_method } # just to avoid a scope issue + let(:clear_data) { clear_data_method } + + def with_store(opts = {}) + s = create_store.call(opts) + begin + yield s + ensure + s.stop + end + end + + def with_inited_store(things) + things_hash = {} + things.each { |thing| things_hash[thing[:key].to_sym] = thing } - let!(:store) do - s = create_store_method.call() - s.init(LaunchDarkly::FEATURES => { key0 => feature0 }) - s + with_store do |s| + s.init({ things_kind => things_hash }) + yield s + end end def new_version_plus(f, deltaVersion, attrs = {}) - f1 = f.clone - f1[:version] = f[:version] + deltaVersion - f1.update(attrs) - f1 + f.clone.merge({ version: f[:version] + deltaVersion }).merge(attrs) end + before(:each) do + clear_data.call if !clear_data.nil? + end - it "is initialized" do - expect(store.initialized?).to eq true + # This block of tests is only run if the clear_data method is defined, meaning that this is a persistent store + # that operates on a database that can be shared with other store instances (as opposed to the in-memory store, + # which has its own private storage). + if !clear_data_method.nil? + it "is not initialized by default" do + with_store do |store| + expect(store.initialized?).to eq false + end + end + + it "can detect if another instance has initialized the store" do + with_store do |store1| + store1.init({}) + with_store do |store2| + expect(store2.initialized?).to eq true + end + end + end + + it "can read data written by another instance" do + with_store do |store1| + store1.init({ things_kind => { key1.to_sym => thing1 } }) + with_store do |store2| + expect(store2.get(things_kind, key1)).to eq thing1 + end + end + end + + it "is independent from other stores with different prefixes" do + with_store({ prefix: "a" }) do |store_a| + store_a.init({ things_kind => { key1.to_sym => thing1 } }) + with_store({ prefix: "b" }) do |store_b| + store_b.init({ things_kind => {} }) + end + with_store({ prefix: "b" }) do |store_b1| # this ensures we're not just reading cached data + expect(store_b1.get(things_kind, key1)).to be_nil + expect(store_a.get(things_kind, key1)).to eq thing1 + end + end + end end - it "can get existing feature with symbol key" do - expect(store.get(LaunchDarkly::FEATURES, key0)).to eq feature0 + it "is initialized after calling init" do + with_inited_store([]) do |store| + expect(store.initialized?).to eq true + end end - it "can get existing feature with string key" do - expect(store.get(LaunchDarkly::FEATURES, key0.to_s)).to eq feature0 + it "can get existing item with symbol key" do + with_inited_store([ thing1 ]) do |store| + expect(store.get(things_kind, key1.to_sym)).to eq thing1 + end end - it "gets nil for nonexisting feature" do - expect(store.get(LaunchDarkly::FEATURES, 'nope')).to be_nil + it "can get existing item with string key" do + with_inited_store([ thing1 ]) do |store| + expect(store.get(things_kind, key1.to_s)).to eq thing1 + end end - it "can get all features" do - feature1 = feature0.clone - feature1[:key] = "test-feature-flag1" - feature1[:version] = 5 - feature1[:on] = false - store.upsert(LaunchDarkly::FEATURES, feature1) - expect(store.all(LaunchDarkly::FEATURES)).to eq ({ key0 => feature0, :"test-feature-flag1" => feature1 }) + it "gets nil for nonexisting item" do + with_inited_store([ thing1 ]) do |store| + expect(store.get(things_kind, unused_key)).to be_nil + end end - it "can add new feature" do - feature1 = feature0.clone - feature1[:key] = "test-feature-flag1" - feature1[:version] = 5 - feature1[:on] = false - store.upsert(LaunchDarkly::FEATURES, feature1) - expect(store.get(LaunchDarkly::FEATURES, :"test-feature-flag1")).to eq feature1 + it "returns nil for deleted item" do + deleted_thing = thing1.clone.merge({ deleted: true }) + with_inited_store([ deleted_thing ]) do |store| + expect(store.get(things_kind, key1)).to be_nil + end end - it "can update feature with newer version" do - f1 = new_version_plus(feature0, 1, { on: !feature0[:on] }) - store.upsert(LaunchDarkly::FEATURES, f1) - expect(store.get(LaunchDarkly::FEATURES, key0)).to eq f1 + it "can get all items" do + key2 = "thing2" + thing2 = { + key: key2, + name: "Thing 2", + version: 22, + deleted: false + } + with_inited_store([ thing1, thing2 ]) do |store| + expect(store.all(things_kind)).to eq ({ key1.to_sym => thing1, key2.to_sym => thing2 }) + end + end + + it "filters out deleted items when getting all" do + key2 = "thing2" + thing2 = { + key: key2, + name: "Thing 2", + version: 22, + deleted: true + } + with_inited_store([ thing1, thing2 ]) do |store| + expect(store.all(things_kind)).to eq ({ key1.to_sym => thing1 }) + end end - it "cannot update feature with same version" do - f1 = new_version_plus(feature0, 0, { on: !feature0[:on] }) - store.upsert(LaunchDarkly::FEATURES, f1) - expect(store.get(LaunchDarkly::FEATURES, key0)).to eq feature0 + it "can add new item" do + with_inited_store([]) do |store| + store.upsert(things_kind, thing1) + expect(store.get(things_kind, key1)).to eq thing1 + end + end + + it "can update item with newer version" do + with_inited_store([ thing1 ]) do |store| + thing1_mod = new_version_plus(thing1, 1, { name: thing1[:name] + ' updated' }) + store.upsert(things_kind, thing1_mod) + expect(store.get(things_kind, key1)).to eq thing1_mod + end + end + + it "cannot update item with same version" do + with_inited_store([ thing1 ]) do |store| + thing1_mod = thing1.clone.merge({ name: thing1[:name] + ' updated' }) + store.upsert(things_kind, thing1_mod) + expect(store.get(things_kind, key1)).to eq thing1 + end end it "cannot update feature with older version" do - f1 = new_version_plus(feature0, -1, { on: !feature0[:on] }) - store.upsert(LaunchDarkly::FEATURES, f1) - expect(store.get(LaunchDarkly::FEATURES, key0)).to eq feature0 + with_inited_store([ thing1 ]) do |store| + thing1_mod = new_version_plus(thing1, -1, { name: thing1[:name] + ' updated' }) + store.upsert(things_kind, thing1_mod) + expect(store.get(things_kind, key1)).to eq thing1 + end end - it "can delete feature with newer version" do - store.delete(LaunchDarkly::FEATURES, key0, feature0[:version] + 1) - expect(store.get(LaunchDarkly::FEATURES, key0)).to be_nil + it "can delete item with newer version" do + with_inited_store([ thing1 ]) do |store| + store.delete(things_kind, key1, thing1[:version] + 1) + expect(store.get(things_kind, key1)).to be_nil + end end - it "cannot delete feature with same version" do - store.delete(LaunchDarkly::FEATURES, key0, feature0[:version]) - expect(store.get(LaunchDarkly::FEATURES, key0)).to eq feature0 + it "cannot delete item with same version" do + with_inited_store([ thing1 ]) do |store| + store.delete(things_kind, key1, thing1[:version]) + expect(store.get(things_kind, key1)).to eq thing1 + end end - it "cannot delete feature with older version" do - store.delete(LaunchDarkly::FEATURES, key0, feature0[:version] - 1) - expect(store.get(LaunchDarkly::FEATURES, key0)).to eq feature0 + it "cannot delete item with older version" do + with_inited_store([ thing1 ]) do |store| + store.delete(things_kind, key1, thing1[:version] - 1) + expect(store.get(things_kind, key1)).to eq thing1 + end + end + + it "stores Unicode data correctly" do + flag = { + key: "my-fancy-flag", + name: "Tęst Feåtūre Flæg😺", + version: 1, + deleted: false + } + with_inited_store([]) do |store| + store.upsert(LaunchDarkly::FEATURES, flag) + expect(store.get(LaunchDarkly::FEATURES, flag[:key])).to eq flag + end end end diff --git a/spec/file_data_source_spec.rb b/spec/file_data_source_spec.rb index 60107e26..28a0c06f 100644 --- a/spec/file_data_source_spec.rb +++ b/spec/file_data_source_spec.rb @@ -219,7 +219,7 @@ def test_auto_reload(options) it "evaluates simplified flag with client as expected" do file = make_temp_file(all_properties_json) factory = LaunchDarkly::FileDataSource.factory({ paths: file.path }) - config = LaunchDarkly::Config.new(send_events: false, update_processor_factory: factory) + config = LaunchDarkly::Config.new(send_events: false, data_source: factory) client = LaunchDarkly::LDClient.new('sdkKey', config) begin @@ -233,7 +233,7 @@ def test_auto_reload(options) it "evaluates full flag with client as expected" do file = make_temp_file(all_properties_json) factory = LaunchDarkly::FileDataSource.factory({ paths: file.path }) - config = LaunchDarkly::Config.new(send_events: false, update_processor_factory: factory) + config = LaunchDarkly::Config.new(send_events: false, data_source: factory) client = LaunchDarkly::LDClient.new('sdkKey', config) begin diff --git a/spec/http_util.rb b/spec/http_util.rb new file mode 100644 index 00000000..764f8e48 --- /dev/null +++ b/spec/http_util.rb @@ -0,0 +1,103 @@ +require "webrick" +require "webrick/httpproxy" +require "webrick/https" + +class StubHTTPServer + attr_reader :requests + + @@next_port = 50000 + + def initialize + @port = StubHTTPServer.next_port + begin + base_opts = { + BindAddress: '127.0.0.1', + Port: @port, + AccessLog: [], + Logger: NullLogger.new, + RequestCallback: method(:record_request) + } + @server = create_server(@port, base_opts) + rescue Errno::EADDRINUSE + @port = StubHTTPServer.next_port + retry + end + @requests = [] + end + + def self.next_port + p = @@next_port + @@next_port = (p + 1 < 60000) ? p + 1 : 50000 + p + end + + def create_server(port, base_opts) + WEBrick::HTTPServer.new(base_opts) + end + + def start + Thread.new { @server.start } + end + + def stop + @server.shutdown + end + + def base_uri + URI("http://127.0.0.1:#{@port}") + end + + def setup_response(uri_path, &action) + @server.mount_proc(uri_path, action) + end + + def setup_ok_response(uri_path, body, content_type=nil, headers={}) + setup_response(uri_path) do |req, res| + res.status = 200 + res.content_type = content_type if !content_type.nil? + res.body = body + headers.each { |n, v| res[n] = v } + end + end + + def record_request(req, res) + @requests.push(req) + end +end + +class StubProxyServer < StubHTTPServer + attr_reader :request_count + attr_accessor :connect_status + + def initialize + super + @request_count = 0 + end + + def create_server(port, base_opts) + WEBrick::HTTPProxyServer.new(base_opts.merge({ + ProxyContentHandler: proc do |req,res| + if !@connect_status.nil? + res.status = @connect_status + end + @request_count += 1 + end + })) + end +end + +class NullLogger + def method_missing(*) + self + end +end + +def with_server(server = nil) + server = StubHTTPServer.new if server.nil? + begin + server.start + yield server + ensure + server.stop + end +end diff --git a/spec/in_memory_feature_store_spec.rb b/spec/in_memory_feature_store_spec.rb index a1673bbc..c403fc69 100644 --- a/spec/in_memory_feature_store_spec.rb +++ b/spec/in_memory_feature_store_spec.rb @@ -1,7 +1,7 @@ require "feature_store_spec_base" require "spec_helper" -def create_in_memory_store() +def create_in_memory_store(opts = {}) LaunchDarkly::InMemoryFeatureStore.new end diff --git a/spec/integrations/consul_feature_store_spec.rb b/spec/integrations/consul_feature_store_spec.rb new file mode 100644 index 00000000..13767686 --- /dev/null +++ b/spec/integrations/consul_feature_store_spec.rb @@ -0,0 +1,41 @@ +require "feature_store_spec_base" +require "diplomat" +require "spec_helper" + + +$my_prefix = 'testprefix' +$null_log = ::Logger.new($stdout) +$null_log.level = ::Logger::FATAL + +$consul_base_opts = { + prefix: $my_prefix, + logger: $null_log +} + +def create_consul_store(opts = {}) + LaunchDarkly::Integrations::Consul::new_feature_store( + $consul_base_opts.merge(opts).merge({ expiration: 60 })) +end + +def create_consul_store_uncached(opts = {}) + LaunchDarkly::Integrations::Consul::new_feature_store( + $consul_base_opts.merge(opts).merge({ expiration: 0 })) +end + +def clear_all_data + Diplomat::Kv.delete($my_prefix + '/', recurse: true) +end + + +describe "Consul feature store" do + + # These tests will all fail if there isn't a local Consul instance running. + + context "with local cache" do + include_examples "feature_store", method(:create_consul_store), method(:clear_all_data) + end + + context "without local cache" do + include_examples "feature_store", method(:create_consul_store_uncached), method(:clear_all_data) + end +end diff --git a/spec/integrations/dynamodb_feature_store_spec.rb b/spec/integrations/dynamodb_feature_store_spec.rb new file mode 100644 index 00000000..4add3d53 --- /dev/null +++ b/spec/integrations/dynamodb_feature_store_spec.rb @@ -0,0 +1,104 @@ +require "feature_store_spec_base" +require "aws-sdk-dynamodb" +require "spec_helper" + + +$table_name = 'LD_DYNAMODB_TEST_TABLE' +$endpoint = 'http://localhost:8000' +$my_prefix = 'testprefix' +$null_log = ::Logger.new($stdout) +$null_log.level = ::Logger::FATAL + +$dynamodb_opts = { + credentials: Aws::Credentials.new("key", "secret"), + region: "us-east-1", + endpoint: $endpoint +} + +$ddb_base_opts = { + dynamodb_opts: $dynamodb_opts, + prefix: $my_prefix, + logger: $null_log +} + +def create_dynamodb_store(opts = {}) + LaunchDarkly::Integrations::DynamoDB::new_feature_store($table_name, + $ddb_base_opts.merge(opts).merge({ expiration: 60 })) +end + +def create_dynamodb_store_uncached(opts = {}) + LaunchDarkly::Integrations::DynamoDB::new_feature_store($table_name, + $ddb_base_opts.merge(opts).merge({ expiration: 0 })) +end + +def clear_all_data + client = create_test_client + items_to_delete = [] + req = { + table_name: $table_name, + projection_expression: '#namespace, #key', + expression_attribute_names: { + '#namespace' => 'namespace', + '#key' => 'key' + } + } + while true + resp = client.scan(req) + items_to_delete = items_to_delete + resp.items + break if resp.last_evaluated_key.nil? || resp.last_evaluated_key.length == 0 + req.exclusive_start_key = resp.last_evaluated_key + end + requests = items_to_delete.map do |item| + { delete_request: { key: item } } + end + LaunchDarkly::Impl::Integrations::DynamoDB::DynamoDBUtil.batch_write_requests(client, $table_name, requests) +end + +def create_table_if_necessary + client = create_test_client + begin + client.describe_table({ table_name: $table_name }) + return # no error, table exists + rescue Aws::DynamoDB::Errors::ResourceNotFoundException + # fall through to code below - we'll create the table + end + + req = { + table_name: $table_name, + key_schema: [ + { attribute_name: "namespace", key_type: "HASH" }, + { attribute_name: "key", key_type: "RANGE" } + ], + attribute_definitions: [ + { attribute_name: "namespace", attribute_type: "S" }, + { attribute_name: "key", attribute_type: "S" } + ], + provisioned_throughput: { + read_capacity_units: 1, + write_capacity_units: 1 + } + } + client.create_table(req) + + # When DynamoDB creates a table, it may not be ready to use immediately +end + +def create_test_client + Aws::DynamoDB::Client.new($dynamodb_opts) +end + + +describe "DynamoDB feature store" do + + # These tests will all fail if there isn't a local DynamoDB instance running. + + create_table_if_necessary + + context "with local cache" do + include_examples "feature_store", method(:create_dynamodb_store), method(:clear_all_data) + end + + context "without local cache" do + include_examples "feature_store", method(:create_dynamodb_store_uncached), method(:clear_all_data) + end +end diff --git a/spec/integrations/store_wrapper_spec.rb b/spec/integrations/store_wrapper_spec.rb new file mode 100644 index 00000000..e7890802 --- /dev/null +++ b/spec/integrations/store_wrapper_spec.rb @@ -0,0 +1,276 @@ +require "spec_helper" + +describe LaunchDarkly::Integrations::Util::CachingStoreWrapper do + subject { LaunchDarkly::Integrations::Util::CachingStoreWrapper } + + THINGS = { namespace: "things" } + + shared_examples "tests" do |cached| + opts = cached ? { expiration: 30 } : { expiration: 0 } + + it "gets item" do + core = MockCore.new + wrapper = subject.new(core, opts) + key = "flag" + itemv1 = { key: key, version: 1 } + itemv2 = { key: key, version: 2 } + + core.force_set(THINGS, itemv1) + expect(wrapper.get(THINGS, key)).to eq itemv1 + + core.force_set(THINGS, itemv2) + expect(wrapper.get(THINGS, key)).to eq (cached ? itemv1 : itemv2) # if cached, we will not see the new underlying value yet + end + + it "gets deleted item" do + core = MockCore.new + wrapper = subject.new(core, opts) + key = "flag" + itemv1 = { key: key, version: 1, deleted: true } + itemv2 = { key: key, version: 2, deleted: false } + + core.force_set(THINGS, itemv1) + expect(wrapper.get(THINGS, key)).to eq nil # item is filtered out because deleted is true + + core.force_set(THINGS, itemv2) + expect(wrapper.get(THINGS, key)).to eq (cached ? nil : itemv2) # if cached, we will not see the new underlying value yet + end + + it "gets missing item" do + core = MockCore.new + wrapper = subject.new(core, opts) + key = "flag" + item = { key: key, version: 1 } + + expect(wrapper.get(THINGS, key)).to eq nil + + core.force_set(THINGS, item) + expect(wrapper.get(THINGS, key)).to eq (cached ? nil : item) # the cache can retain a nil result + end + + it "gets all items" do + core = MockCore.new + wrapper = subject.new(core, opts) + item1 = { key: "flag1", version: 1 } + item2 = { key: "flag2", version: 1 } + + core.force_set(THINGS, item1) + core.force_set(THINGS, item2) + expect(wrapper.all(THINGS)).to eq({ item1[:key] => item1, item2[:key] => item2 }) + + core.force_remove(THINGS, item2[:key]) + expect(wrapper.all(THINGS)).to eq (cached ? + { item1[:key] => item1, item2[:key] => item2 } : + { item1[:key] => item1 }) + end + + it "gets all items filtering out deleted items" do + core = MockCore.new + wrapper = subject.new(core, opts) + item1 = { key: "flag1", version: 1 } + item2 = { key: "flag2", version: 1, deleted: true } + + core.force_set(THINGS, item1) + core.force_set(THINGS, item2) + expect(wrapper.all(THINGS)).to eq({ item1[:key] => item1 }) + end + + it "upserts item successfully" do + core = MockCore.new + wrapper = subject.new(core, opts) + key = "flag" + itemv1 = { key: key, version: 1 } + itemv2 = { key: key, version: 2 } + + wrapper.upsert(THINGS, itemv1) + expect(core.data[THINGS][key]).to eq itemv1 + + wrapper.upsert(THINGS, itemv2) + expect(core.data[THINGS][key]).to eq itemv2 + + # if we have a cache, verify that the new item is now cached by writing a different value + # to the underlying data - Get should still return the cached item + if cached + itemv3 = { key: key, version: 3 } + core.force_set(THINGS, itemv3) + end + + expect(wrapper.get(THINGS, key)).to eq itemv2 + end + + it "deletes item" do + core = MockCore.new + wrapper = subject.new(core, opts) + key = "flag" + itemv1 = { key: key, version: 1 } + itemv2 = { key: key, version: 2, deleted: true } + itemv3 = { key: key, version: 3 } + + core.force_set(THINGS, itemv1) + expect(wrapper.get(THINGS, key)).to eq itemv1 + + wrapper.delete(THINGS, key, 2) + expect(core.data[THINGS][key]).to eq itemv2 + + core.force_set(THINGS, itemv3) # make a change that bypasses the cache + + expect(wrapper.get(THINGS, key)).to eq (cached ? nil : itemv3) + end + end + + context "cached" do + include_examples "tests", true + + cached_opts = { expiration: 30 } + + it "get uses values from init" do + core = MockCore.new + wrapper = subject.new(core, cached_opts) + item1 = { key: "flag1", version: 1 } + item2 = { key: "flag2", version: 1 } + + wrapper.init({ THINGS => { item1[:key] => item1, item2[:key] => item2 } }) + core.force_remove(THINGS, item1[:key]) + + expect(wrapper.get(THINGS, item1[:key])).to eq item1 + end + + it "get all uses values from init" do + core = MockCore.new + wrapper = subject.new(core, cached_opts) + item1 = { key: "flag1", version: 1 } + item2 = { key: "flag2", version: 1 } + + wrapper.init({ THINGS => { item1[:key] => item1, item2[:key] => item2 } }) + core.force_remove(THINGS, item1[:key]) + + expect(wrapper.all(THINGS)).to eq ({ item1[:key] => item1, item2[:key] => item2 }) + end + + it "upsert doesn't update cache if unsuccessful" do + # This is for an upsert where the data in the store has a higher version. In an uncached + # store, this is just a no-op as far as the wrapper is concerned so there's nothing to + # test here. In a cached store, we need to verify that the cache has been refreshed + # using the data that was found in the store. + core = MockCore.new + wrapper = subject.new(core, cached_opts) + key = "flag" + itemv1 = { key: key, version: 1 } + itemv2 = { key: key, version: 2 } + + wrapper.upsert(THINGS, itemv2) + expect(core.data[THINGS][key]).to eq itemv2 + + wrapper.upsert(THINGS, itemv1) + expect(core.data[THINGS][key]).to eq itemv2 # value in store remains the same + + itemv3 = { key: key, version: 3 } + core.force_set(THINGS, itemv3) # bypasses cache so we can verify that itemv2 is in the cache + expect(wrapper.get(THINGS, key)).to eq itemv2 + end + + it "initialized? can cache false result" do + core = MockCore.new + wrapper = subject.new(core, { expiration: 0.2 }) # use a shorter cache TTL for this test + + expect(wrapper.initialized?).to eq false + expect(core.inited_query_count).to eq 1 + + core.inited = true + expect(wrapper.initialized?).to eq false + expect(core.inited_query_count).to eq 1 + + sleep(0.5) + + expect(wrapper.initialized?).to eq true + expect(core.inited_query_count).to eq 2 + + # From this point on it should remain true and the method should not be called + expect(wrapper.initialized?).to eq true + expect(core.inited_query_count).to eq 2 + end + end + + context "uncached" do + include_examples "tests", false + + uncached_opts = { expiration: 0 } + + it "queries internal initialized state only if not already inited" do + core = MockCore.new + wrapper = subject.new(core, uncached_opts) + + expect(wrapper.initialized?).to eq false + expect(core.inited_query_count).to eq 1 + + core.inited = true + expect(wrapper.initialized?).to eq true + expect(core.inited_query_count).to eq 2 + + core.inited = false + expect(wrapper.initialized?).to eq true + expect(core.inited_query_count).to eq 2 + end + + it "does not query internal initialized state if init has been called" do + core = MockCore.new + wrapper = subject.new(core, uncached_opts) + + expect(wrapper.initialized?).to eq false + expect(core.inited_query_count).to eq 1 + + wrapper.init({}) + + expect(wrapper.initialized?).to eq true + expect(core.inited_query_count).to eq 1 + end + end + + class MockCore + def initialize + @data = {} + @inited = false + @inited_query_count = 0 + end + + attr_reader :data + attr_reader :inited_query_count + attr_accessor :inited + + def force_set(kind, item) + @data[kind] = {} if !@data.has_key?(kind) + @data[kind][item[:key]] = item + end + + def force_remove(kind, key) + @data[kind].delete(key) if @data.has_key?(kind) + end + + def init_internal(all_data) + @data = all_data + @inited = true + end + + def get_internal(kind, key) + items = @data[kind] + items.nil? ? nil : items[key] + end + + def get_all_internal(kind) + @data[kind] + end + + def upsert_internal(kind, item) + @data[kind] = {} if !@data.has_key?(kind) + old_item = @data[kind][item[:key]] + return old_item if !old_item.nil? && old_item[:version] >= item[:version] + @data[kind][item[:key]] = item + item + end + + def initialized_internal? + @inited_query_count = @inited_query_count + 1 + @inited + end + end +end diff --git a/spec/ldclient_spec.rb b/spec/ldclient_spec.rb index 262f53f9..fca81ab0 100644 --- a/spec/ldclient_spec.rb +++ b/spec/ldclient_spec.rb @@ -7,8 +7,8 @@ let(:offline_client) do subject.new("secret", offline_config) end - let(:update_processor) { LaunchDarkly::NullUpdateProcessor.new } - let(:config) { LaunchDarkly::Config.new({send_events: false, update_processor: update_processor}) } + let(:null_data) { LaunchDarkly::NullUpdateProcessor.new } + let(:config) { LaunchDarkly::Config.new({send_events: false, data_source: null_data}) } let(:client) do subject.new("secret", config) end @@ -357,7 +357,7 @@ def event_processor end describe 'with send_events: false' do - let(:config) { LaunchDarkly::Config.new({offline: true, send_events: false, update_processor: update_processor}) } + let(:config) { LaunchDarkly::Config.new({offline: true, send_events: false, data_source: null_data}) } let(:client) { subject.new("secret", config) } it "uses a NullEventProcessor" do @@ -367,7 +367,7 @@ def event_processor end describe 'with send_events: true' do - let(:config_with_events) { LaunchDarkly::Config.new({offline: false, send_events: true, update_processor: update_processor}) } + let(:config_with_events) { LaunchDarkly::Config.new({offline: false, send_events: true, data_source: null_data}) } let(:client_with_events) { subject.new("secret", config_with_events) } it "does not use a NullEventProcessor" do @@ -375,4 +375,83 @@ def event_processor expect(ep).not_to be_a(LaunchDarkly::NullEventProcessor) end end + + describe "feature store data ordering" do + let(:dependency_ordering_test_data) { + { + LaunchDarkly::FEATURES => { + a: { key: "a", prerequisites: [ { key: "b" }, { key: "c" } ] }, + b: { key: "b", prerequisites: [ { key: "c" }, { key: "e" } ] }, + c: { key: "c" }, + d: { key: "d" }, + e: { key: "e" }, + f: { key: "f" } + }, + LaunchDarkly::SEGMENTS => { + o: { key: "o" } + } + } + } + + class FakeFeatureStore + attr_reader :received_data + + def init(all_data) + @received_data = all_data + end + end + + class FakeUpdateProcessor + def initialize(store, data) + @store = store + @data = data + end + + def start + @store.init(@data) + ev = Concurrent::Event.new + ev.set + ev + end + + def stop + end + + def initialized? + true + end + end + + it "passes data set to feature store in correct order on init" do + store = FakeFeatureStore.new + data_source_factory = lambda { |sdk_key, config| FakeUpdateProcessor.new(config.feature_store, + dependency_ordering_test_data) } + config = LaunchDarkly::Config.new(send_events: false, feature_store: store, data_source: data_source_factory) + client = subject.new("secret", config) + + data = store.received_data + expect(data).not_to be_nil + expect(data.count).to eq(2) + + # Segments should always come first + expect(data.keys[0]).to be(LaunchDarkly::SEGMENTS) + expect(data.values[0].count).to eq(dependency_ordering_test_data[LaunchDarkly::SEGMENTS].count) + + # Features should be ordered so that a flag always appears after its prerequisites, if any + expect(data.keys[1]).to be(LaunchDarkly::FEATURES) + flags_map = data.values[1] + flags_list = flags_map.values + expect(flags_list.count).to eq(dependency_ordering_test_data[LaunchDarkly::FEATURES].count) + flags_list.each_with_index do |item, item_index| + (item[:prerequisites] || []).each do |prereq| + prereq = flags_map[prereq[:key].to_sym] + prereq_index = flags_list.index(prereq) + if prereq_index > item_index + all_keys = (flags_list.map { |f| f[:key] }).join(", ") + raise "#{item[:key]} depends on #{prereq[:key]}, but #{item[:key]} was listed first; keys in order are [#{all_keys}]" + end + end + end + end + end end \ No newline at end of file diff --git a/spec/redis_feature_store_spec.rb b/spec/redis_feature_store_spec.rb index d27cdb39..3da25f4f 100644 --- a/spec/redis_feature_store_spec.rb +++ b/spec/redis_feature_store_spec.rb @@ -9,13 +9,22 @@ $null_log = ::Logger.new($stdout) $null_log.level = ::Logger::FATAL +$base_opts = { + prefix: $my_prefix, + logger: $null_log +} -def create_redis_store() - LaunchDarkly::RedisFeatureStore.new(prefix: $my_prefix, logger: $null_log, expiration: 60) +def create_redis_store(opts = {}) + LaunchDarkly::RedisFeatureStore.new($base_opts.merge(opts).merge({ expiration: 60 })) end -def create_redis_store_uncached() - LaunchDarkly::RedisFeatureStore.new(prefix: $my_prefix, logger: $null_log, expiration: 0) +def create_redis_store_uncached(opts = {}) + LaunchDarkly::RedisFeatureStore.new($base_opts.merge(opts).merge({ expiration: 0 })) +end + +def clear_all_data + client = Redis.new + client.flushdb end @@ -25,16 +34,17 @@ def create_redis_store_uncached() # These tests will all fail if there isn't a Redis instance running on the default port. context "real Redis with local cache" do - include_examples "feature_store", method(:create_redis_store) + include_examples "feature_store", method(:create_redis_store), method(:clear_all_data) end context "real Redis without local cache" do - include_examples "feature_store", method(:create_redis_store_uncached) + include_examples "feature_store", method(:create_redis_store_uncached), method(:clear_all_data) end - def add_concurrent_modifier(store, other_client, flag, start_version, end_version) + def make_concurrent_modifier_test_hook(other_client, flag, start_version, end_version) + test_hook = Object.new version_counter = start_version - expect(store).to receive(:before_update_transaction) { |base_key, key| + expect(test_hook).to receive(:before_update_transaction) { |base_key, key| if version_counter <= end_version new_flag = flag.clone new_flag[:version] = version_counter @@ -42,18 +52,18 @@ def add_concurrent_modifier(store, other_client, flag, start_version, end_versio version_counter = version_counter + 1 end }.at_least(:once) + test_hook end it "handles upsert race condition against external client with lower version" do - store = create_redis_store other_client = Redis.new({ url: "redis://localhost:6379" }) + flag = { key: "foo", version: 1 } + test_hook = make_concurrent_modifier_test_hook(other_client, flag, 2, 4) + store = create_redis_store({ test_hook: test_hook }) begin - flag = { key: "foo", version: 1 } store.init(LaunchDarkly::FEATURES => { flag[:key] => flag }) - add_concurrent_modifier(store, other_client, flag, 2, 4) - my_ver = { key: "foo", version: 10 } store.upsert(LaunchDarkly::FEATURES, my_ver) result = store.get(LaunchDarkly::FEATURES, flag[:key]) @@ -64,15 +74,14 @@ def add_concurrent_modifier(store, other_client, flag, start_version, end_versio end it "handles upsert race condition against external client with higher version" do - store = create_redis_store other_client = Redis.new({ url: "redis://localhost:6379" }) + flag = { key: "foo", version: 1 } + test_hook = make_concurrent_modifier_test_hook(other_client, flag, 3, 3) + store = create_redis_store({ test_hook: test_hook }) begin - flag = { key: "foo", version: 1 } store.init(LaunchDarkly::FEATURES => { flag[:key] => flag }) - add_concurrent_modifier(store, other_client, flag, 3, 3) - my_ver = { key: "foo", version: 2 } store.upsert(LaunchDarkly::FEATURES, my_ver) result = store.get(LaunchDarkly::FEATURES, flag[:key]) diff --git a/spec/requestor_spec.rb b/spec/requestor_spec.rb index b7838200..7f2b8ad7 100644 --- a/spec/requestor_spec.rb +++ b/spec/requestor_spec.rb @@ -1,52 +1,58 @@ +require "http_util" require "spec_helper" -require "faraday" describe LaunchDarkly::Requestor do describe ".request_all_flags" do describe "with a proxy" do - let(:requestor) { - LaunchDarkly::Requestor.new( - "key", - LaunchDarkly::Config.new({ - :proxy => "http://proxy.com", - :base_uri => "http://ld.com" - }) - ) - } it "converts the proxy option" do - faraday = Faraday.new - requestor.instance_variable_set(:@client, faraday) - allow(faraday).to receive(:get) do |*args, &block| - req = double(Faraday::Request, :headers => {}, :options => Faraday::RequestOptions.new) - block.call(req) - expect(args).to eq ['http://ld.com/sdk/latest-all'] - expect(req.options.proxy[:uri]).to eq URI("http://proxy.com") - double(body: '{"foo": "bar"}', status: 200, headers: {}) + content = '{"flags": {"flagkey": {"key": "flagkey"}}}' + with_server do |server| + server.setup_ok_response("/sdk/latest-all", content, "application/json", { "etag" => "x" }) + with_server(StubProxyServer.new) do |proxy| + config = LaunchDarkly::Config.new(base_uri: server.base_uri.to_s, proxy: proxy.base_uri.to_s) + r = LaunchDarkly::Requestor.new("sdk-key", config) + result = r.request_all_data + expect(result).to eq(JSON.parse(content, symbolize_names: true)) + end end - - requestor.request_all_data() end end describe "without a proxy" do - let(:requestor) { - LaunchDarkly::Requestor.new( - "key", - LaunchDarkly::Config.new({ - :base_uri => "http://ld.com" - }) - ) - } - it "converts the proxy option" do - faraday = Faraday.new - requestor.instance_variable_set(:@client, faraday) - allow(faraday).to receive(:get) do |*args, &block| - req = double(Faraday::Request, :headers => {}, :options => Faraday::RequestOptions.new) - block.call(req) - expect(args).to eq ['http://ld.com/sdk/latest-all'] - expect(req.options.proxy).to eq nil - double(body: '{"foo": "bar"}', status: 200, headers: {}) + it "sends headers" do + content = '{"flags": {}}' + sdk_key = 'sdk-key' + with_server do |server| + server.setup_ok_response("/sdk/latest-all", content, "application/json", { "etag" => "x" }) + r = LaunchDarkly::Requestor.new(sdk_key, LaunchDarkly::Config.new({ base_uri: server.base_uri.to_s })) + r.request_all_data + expect(server.requests.length).to eq 1 + req = server.requests[0] + expect(req.header['authorization']).to eq [sdk_key] + expect(req.header['user-agent']).to eq ["RubyClient/" + LaunchDarkly::VERSION] + end + end + + it "receives data" do + content = '{"flags": {"flagkey": {"key": "flagkey"}}}' + with_server do |server| + server.setup_ok_response("/sdk/latest-all", content, "application/json", { "etag" => "x" }) + r = LaunchDarkly::Requestor.new("sdk-key", LaunchDarkly::Config.new({ base_uri: server.base_uri.to_s })) + result = r.request_all_data + expect(result).to eq(JSON.parse(content, symbolize_names: true)) + end + end + + it "handles Unicode content" do + content = '{"flags": {"flagkey": {"key": "flagkey", "variations": ["blue", "grėeń"]}}}' + with_server do |server| + server.setup_ok_response("/sdk/latest-all", content, "application/json", { "etag" => "x" }) + # Note that the ETag header here is important because without it, the HTTP cache will not be used, + # and the cache is what required a fix to handle Unicode properly. See: + # https://github.com/launchdarkly/ruby-client/issues/90 + r = LaunchDarkly::Requestor.new("sdk-key", LaunchDarkly::Config.new({ base_uri: server.base_uri.to_s })) + result = r.request_all_data + expect(result).to eq(JSON.parse(content, symbolize_names: true)) end - requestor.request_all_data() end end end diff --git a/spec/sse_client/sse_client_spec.rb b/spec/sse_client/sse_client_spec.rb deleted file mode 100644 index 54f1f5c7..00000000 --- a/spec/sse_client/sse_client_spec.rb +++ /dev/null @@ -1,177 +0,0 @@ -require "spec_helper" -require "socketry" -require "sse_client/sse_shared" - -# -# End-to-end tests of SSEClient against a real server -# -describe SSE::SSEClient do - subject { SSE::SSEClient } - - def with_client(client) - begin - yield client - ensure - client.close - end - end - - it "sends expected headers" do - with_server do |server| - requests = Queue.new - server.setup_response("/") do |req,res| - requests << req - res.content_type = "text/event-stream" - res.status = 200 - end - - headers = { - "Authorization" => "secret" - } - - with_client(subject.new(server.base_uri, headers: headers)) do |client| - received_req = requests.pop - expect(received_req.header).to eq({ - "accept" => ["text/event-stream"], - "cache-control" => ["no-cache"], - "host" => ["127.0.0.1"], - "authorization" => ["secret"] - }) - end - end - end - - it "receives messages" do - events_body = <<-EOT -event: go -data: foo -id: 1 - -event: stop -data: bar - -EOT - with_server do |server| - server.setup_response("/") do |req,res| - res.content_type = "text/event-stream" - res.status = 200 - res.body = events_body - end - - event_sink = Queue.new - client = subject.new(server.base_uri) do |c| - c.on_event { |event| event_sink << event } - end - - with_client(client) do |client| - expect(event_sink.pop).to eq(SSE::SSEEvent.new(:go, "foo", "1")) - expect(event_sink.pop).to eq(SSE::SSEEvent.new(:stop, "bar", nil)) - end - end - end - - it "reconnects after error response" do - events_body = <<-EOT -event: go -data: foo - -EOT - with_server do |server| - attempt = 0 - server.setup_response("/") do |req,res| - attempt += 1 - if attempt == 1 - res.status = 500 - res.body = "sorry" - res.keep_alive = false - else - res.content_type = "text/event-stream" - res.status = 200 - res.body = events_body - end - end - - event_sink = Queue.new - error_sink = Queue.new - client = subject.new(server.base_uri, reconnect_time: 0.25) do |c| - c.on_event { |event| event_sink << event } - c.on_error { |error| error_sink << error } - end - - with_client(client) do |client| - expect(event_sink.pop).to eq(SSE::SSEEvent.new(:go, "foo", nil)) - expect(error_sink.pop).to eq({ status_code: 500, body: "sorry" }) - expect(attempt).to be >= 2 - end - end - end - - it "reconnects after read timeout" do - events_body = <<-EOT -event: go -data: foo - -EOT - with_server do |server| - attempt = 0 - server.setup_response("/") do |req,res| - attempt += 1 - if attempt == 1 - sleep(2) - end - res.content_type = "text/event-stream" - res.status = 200 - res.body = events_body - end - - event_sink = Queue.new - client = subject.new(server.base_uri, - reconnect_time: 0.25, read_timeout: 0.25) do |c| - c.on_event { |event| event_sink << event } - end - - with_client(client) do |client| - expect(event_sink.pop).to eq(SSE::SSEEvent.new(:go, "foo", nil)) - expect(attempt).to be >= 2 - end - end - end - - it "reconnects if stream returns EOF" do - events_body_1 = <<-EOT -event: go -data: foo - -EOT - events_body_2 = <<-EOT -event: go -data: bar - -EOT - with_server do |server| - attempt = 0 - server.setup_response("/") do |req,res| - attempt += 1 - if attempt == 1 - res.body = events_body_1 - else - res.body = events_body_2 - end - res.content_type = "text/event-stream" - res.status = 200 - end - - event_sink = Queue.new - client = subject.new(server.base_uri, - reconnect_time: 0.25, read_timeout: 0.25) do |c| - c.on_event { |event| event_sink << event } - end - - with_client(client) do |client| - expect(event_sink.pop).to eq(SSE::SSEEvent.new(:go, "foo", nil)) - expect(event_sink.pop).to eq(SSE::SSEEvent.new(:go, "bar", nil)) - expect(attempt).to be >= 2 - end - end - end -end diff --git a/spec/sse_client/sse_events_spec.rb b/spec/sse_client/sse_events_spec.rb deleted file mode 100644 index 438cfa7a..00000000 --- a/spec/sse_client/sse_events_spec.rb +++ /dev/null @@ -1,100 +0,0 @@ -require "spec_helper" - -describe SSE::EventParser do - subject { SSE::EventParser } - - it "parses an event with all fields" do - lines = [ - "event: abc\r\n", - "data: def\r\n", - "id: 1\r\n", - "\r\n" - ] - ep = subject.new(lines) - - expected_event = SSE::SSEEvent.new(:abc, "def", "1") - output = ep.items.to_a - expect(output).to eq([ expected_event ]) - end - - it "parses an event with only data" do - lines = [ - "data: def\r\n", - "\r\n" - ] - ep = subject.new(lines) - - expected_event = SSE::SSEEvent.new(:message, "def", nil) - output = ep.items.to_a - expect(output).to eq([ expected_event ]) - end - - it "parses an event with multi-line data" do - lines = [ - "data: def\r\n", - "data: ghi\r\n", - "\r\n" - ] - ep = subject.new(lines) - - expected_event = SSE::SSEEvent.new(:message, "def\nghi", nil) - output = ep.items.to_a - expect(output).to eq([ expected_event ]) - end - - it "ignores comments" do - lines = [ - ":", - "data: def\r\n", - ":", - "\r\n" - ] - ep = subject.new(lines) - - expected_event = SSE::SSEEvent.new(:message, "def", nil) - output = ep.items.to_a - expect(output).to eq([ expected_event ]) - end - - it "parses reconnect interval" do - lines = [ - "retry: 2500\r\n", - "\r\n" - ] - ep = subject.new(lines) - - expected_item = SSE::SSESetRetryInterval.new(2500) - output = ep.items.to_a - expect(output).to eq([ expected_item ]) - end - - it "parses multiple events" do - lines = [ - "event: abc\r\n", - "data: def\r\n", - "id: 1\r\n", - "\r\n", - "data: ghi\r\n", - "\r\n" - ] - ep = subject.new(lines) - - expected_event_1 = SSE::SSEEvent.new(:abc, "def", "1") - expected_event_2 = SSE::SSEEvent.new(:message, "ghi", nil) - output = ep.items.to_a - expect(output).to eq([ expected_event_1, expected_event_2 ]) - end - - it "ignores events with no data" do - lines = [ - "event: nothing\r\n", - "\r\n", - "event: nada\r\n", - "\r\n" - ] - ep = subject.new(lines) - - output = ep.items.to_a - expect(output).to eq([]) - end -end diff --git a/spec/sse_client/sse_shared.rb b/spec/sse_client/sse_shared.rb deleted file mode 100644 index 3ecabb57..00000000 --- a/spec/sse_client/sse_shared.rb +++ /dev/null @@ -1,82 +0,0 @@ -require "spec_helper" -require "webrick" -require "webrick/httpproxy" -require "webrick/https" - -class StubHTTPServer - def initialize - @port = 50000 - begin - @server = create_server(@port) - rescue Errno::EADDRINUSE - @port += 1 - retry - end - end - - def create_server(port) - WEBrick::HTTPServer.new( - BindAddress: '127.0.0.1', - Port: port, - AccessLog: [], - Logger: NullLogger.new - ) - end - - def start - Thread.new { @server.start } - end - - def stop - @server.shutdown - end - - def base_uri - URI("http://127.0.0.1:#{@port}") - end - - def setup_response(uri_path, &action) - @server.mount_proc(uri_path, action) - end -end - -class StubProxyServer < StubHTTPServer - attr_reader :request_count - attr_accessor :connect_status - - def initialize - super - @request_count = 0 - end - - def create_server(port) - WEBrick::HTTPProxyServer.new( - BindAddress: '127.0.0.1', - Port: port, - AccessLog: [], - Logger: NullLogger.new, - ProxyContentHandler: proc do |req,res| - if !@connect_status.nil? - res.status = @connect_status - end - @request_count += 1 - end - ) - end -end - -class NullLogger - def method_missing(*) - self - end -end - -def with_server(server = nil) - server = StubHTTPServer.new if server.nil? - begin - server.start - yield server - ensure - server.stop - end -end diff --git a/spec/sse_client/streaming_http_spec.rb b/spec/sse_client/streaming_http_spec.rb deleted file mode 100644 index 7dfac9bd..00000000 --- a/spec/sse_client/streaming_http_spec.rb +++ /dev/null @@ -1,263 +0,0 @@ -require "spec_helper" -require "socketry" -require "sse_client/sse_shared" - -# -# End-to-end tests of HTTP requests against a real server -# -describe SSE::StreamingHTTPConnection do - subject { SSE::StreamingHTTPConnection } - - def with_connection(cxn) - begin - yield cxn - ensure - cxn.close - end - end - - it "makes HTTP connection and sends request" do - with_server do |server| - requests = Queue.new - server.setup_response("/foo") do |req,res| - requests << req - res.status = 200 - end - headers = { - "Accept" => "text/plain" - } - with_connection(subject.new(server.base_uri.merge("/foo?bar"), nil, headers, 30, 30)) do - received_req = requests.pop - expect(received_req.unparsed_uri).to eq("/foo?bar") - expect(received_req.header).to eq({ - "accept" => ["text/plain"], - "host" => [server.base_uri.host] - }) - end - end - end - - it "receives response status" do - with_server do |server| - server.setup_response("/foo") do |req,res| - res.status = 204 - end - with_connection(subject.new(server.base_uri.merge("/foo"), nil, {}, 30, 30)) do |cxn| - expect(cxn.status).to eq(204) - end - end - end - - it "receives response headers" do - with_server do |server| - server.setup_response("/foo") do |req,res| - res["Content-Type"] = "application/json" - end - with_connection(subject.new(server.base_uri.merge("/foo"), nil, {}, 30, 30)) do |cxn| - expect(cxn.headers["content-type"]).to eq("application/json") - end - end - end - - it "can read response as lines" do - body = <<-EOT -This is -a response -EOT - with_server do |server| - server.setup_response("/foo") do |req,res| - res.body = body - end - with_connection(subject.new(server.base_uri.merge("/foo"), nil, {}, 30, 30)) do |cxn| - lines = cxn.read_lines - expect(lines.next).to eq("This is\n") - expect(lines.next).to eq("a response\n") - end - end - end - - it "can read entire response body" do - body = <<-EOT -This is -a response -EOT - with_server do |server| - server.setup_response("/foo") do |req,res| - res.body = body - end - with_connection(subject.new(server.base_uri.merge("/foo"), nil, {}, 30, 30)) do |cxn| - read_body = cxn.read_all - expect(read_body).to eq("This is\na response\n") - end - end - end - - it "enforces read timeout" do - with_server do |server| - server.setup_response("/") do |req,res| - sleep(2) - res.status = 200 - end - expect { subject.new(server.base_uri, nil, {}, 30, 0.25) }.to raise_error(Socketry::TimeoutError) - end - end - - it "connects to HTTP server through proxy" do - body = "hi" - with_server do |server| - server.setup_response("/") do |req,res| - res.body = body - end - with_server(StubProxyServer.new) do |proxy| - with_connection(subject.new(server.base_uri, proxy.base_uri, {}, 30, 30)) do |cxn| - read_body = cxn.read_all - expect(read_body).to eq("hi") - expect(proxy.request_count).to eq(1) - end - end - end - end - - it "throws error if proxy responds with error status" do - with_server do |server| - server.setup_response("/") do |req,res| - res.body = body - end - with_server(StubProxyServer.new) do |proxy| - proxy.connect_status = 403 - expect { subject.new(server.base_uri, proxy.base_uri, {}, 30, 30) }.to raise_error(SSE::ProxyError) - end - end - end - - # The following 2 tests were originally written to connect to an embedded HTTPS server made with - # WEBrick. Unfortunately, some unknown problem prevents WEBrick's self-signed certificate feature - # from working in JRuby 9.1 (but not in any other Ruby version). Therefore these tests currently - # hit an external URL. - - it "connects to HTTPS server" do - with_connection(subject.new(URI("https://app.launchdarkly.com"), nil, {}, 30, 30)) do |cxn| - expect(cxn.status).to eq 200 - end - end - - it "connects to HTTPS server through proxy" do - with_server(StubProxyServer.new) do |proxy| - with_connection(subject.new(URI("https://app.launchdarkly.com"), proxy.base_uri, {}, 30, 30)) do |cxn| - expect(cxn.status).to eq 200 - expect(proxy.request_count).to eq(1) - end - end - end -end - -# -# Tests of response parsing functionality without a real HTTP request -# -describe SSE::HTTPResponseReader do - subject { SSE::HTTPResponseReader } - - let(:simple_response) { <<-EOT -HTTP/1.1 200 OK -Cache-Control: no-cache -Content-Type: text/event-stream - -line1\r -line2 -\r -EOT - } - - def make_chunks(str) - # arbitrarily split content into 5-character blocks - str.scan(/.{1,5}/m).to_enum - end - - def mock_socket_without_timeout(chunks) - mock_socket(chunks) { :eof } - end - - def mock_socket_with_timeout(chunks) - mock_socket(chunks) { raise Socketry::TimeoutError } - end - - def mock_socket(chunks) - sock = double - allow(sock).to receive(:readpartial) do - begin - chunks.next - rescue StopIteration - yield - end - end - sock - end - - it "parses status code" do - socket = mock_socket_without_timeout(make_chunks(simple_response)) - reader = subject.new(socket, 0) - expect(reader.status).to eq(200) - end - - it "parses headers" do - socket = mock_socket_without_timeout(make_chunks(simple_response)) - reader = subject.new(socket, 0) - expect(reader.headers).to eq({ - 'cache-control' => 'no-cache', - 'content-type' => 'text/event-stream' - }) - end - - it "can read entire response body" do - socket = mock_socket_without_timeout(make_chunks(simple_response)) - reader = subject.new(socket, 0) - expect(reader.read_all).to eq("line1\r\nline2\n\r\n") - end - - it "can read response body as lines" do - socket = mock_socket_without_timeout(make_chunks(simple_response)) - reader = subject.new(socket, 0) - expect(reader.read_lines.to_a).to eq([ - "line1\r\n", - "line2\n", - "\r\n" - ]) - end - - it "handles chunked encoding" do - chunked_response = <<-EOT -HTTP/1.1 200 OK -Content-Type: text/plain -Transfer-Encoding: chunked - -6\r -things\r -A\r - and stuff\r -0\r -\r -EOT - socket = mock_socket_without_timeout(make_chunks(chunked_response)) - reader = subject.new(socket, 0) - expect(reader.read_all).to eq("things and stuff") - end - - it "raises error if response ends without complete headers" do - malformed_response = <<-EOT -HTTP/1.1 200 OK -Cache-Control: no-cache -EOT - socket = mock_socket_without_timeout(make_chunks(malformed_response)) - expect { subject.new(socket, 0) }.to raise_error(EOFError) - end - - it "throws timeout if thrown by socket read" do - socket = mock_socket_with_timeout(make_chunks(simple_response)) - reader = subject.new(socket, 0) - lines = reader.read_lines - lines.next - lines.next - lines.next - expect { lines.next }.to raise_error(Socketry::TimeoutError) - end -end diff --git a/spec/stream_spec.rb b/spec/stream_spec.rb index df27e173..648833ff 100644 --- a/spec/stream_spec.rb +++ b/spec/stream_spec.rb @@ -1,5 +1,5 @@ +require "ld-eventsource" require "spec_helper" -require 'ostruct' describe LaunchDarkly::StreamProcessor do subject { LaunchDarkly::StreamProcessor } @@ -8,52 +8,52 @@ let(:processor) { subject.new("sdk_key", config, requestor) } describe '#process_message' do - let(:put_message) { OpenStruct.new({data: '{"data":{"flags":{"asdf": {"key": "asdf"}},"segments":{"segkey": {"key": "segkey"}}}}'}) } - let(:patch_flag_message) { OpenStruct.new({data: '{"path": "/flags/key", "data": {"key": "asdf", "version": 1}}'}) } - let(:patch_seg_message) { OpenStruct.new({data: '{"path": "/segments/key", "data": {"key": "asdf", "version": 1}}'}) } - let(:delete_flag_message) { OpenStruct.new({data: '{"path": "/flags/key", "version": 2}'}) } - let(:delete_seg_message) { OpenStruct.new({data: '{"path": "/segments/key", "version": 2}'}) } - let(:indirect_patch_flag_message) { OpenStruct.new({data: "/flags/key"}) } - let(:indirect_patch_segment_message) { OpenStruct.new({data: "/segments/key"}) } + let(:put_message) { SSE::StreamEvent.new(:put, '{"data":{"flags":{"asdf": {"key": "asdf"}},"segments":{"segkey": {"key": "segkey"}}}}') } + let(:patch_flag_message) { SSE::StreamEvent.new(:patch, '{"path": "/flags/key", "data": {"key": "asdf", "version": 1}}') } + let(:patch_seg_message) { SSE::StreamEvent.new(:patch, '{"path": "/segments/key", "data": {"key": "asdf", "version": 1}}') } + let(:delete_flag_message) { SSE::StreamEvent.new(:delete, '{"path": "/flags/key", "version": 2}') } + let(:delete_seg_message) { SSE::StreamEvent.new(:delete, '{"path": "/segments/key", "version": 2}') } + let(:indirect_patch_flag_message) { SSE::StreamEvent.new(:'indirect/patch', "/flags/key") } + let(:indirect_patch_segment_message) { SSE::StreamEvent.new(:'indirect/patch', "/segments/key") } it "will accept PUT methods" do - processor.send(:process_message, put_message, LaunchDarkly::PUT) + processor.send(:process_message, put_message) expect(config.feature_store.get(LaunchDarkly::FEATURES, "asdf")).to eq(key: "asdf") expect(config.feature_store.get(LaunchDarkly::SEGMENTS, "segkey")).to eq(key: "segkey") end it "will accept PATCH methods for flags" do - processor.send(:process_message, patch_flag_message, LaunchDarkly::PATCH) + processor.send(:process_message, patch_flag_message) expect(config.feature_store.get(LaunchDarkly::FEATURES, "asdf")).to eq(key: "asdf", version: 1) end it "will accept PATCH methods for segments" do - processor.send(:process_message, patch_seg_message, LaunchDarkly::PATCH) + processor.send(:process_message, patch_seg_message) expect(config.feature_store.get(LaunchDarkly::SEGMENTS, "asdf")).to eq(key: "asdf", version: 1) end it "will accept DELETE methods for flags" do - processor.send(:process_message, patch_flag_message, LaunchDarkly::PATCH) - processor.send(:process_message, delete_flag_message, LaunchDarkly::DELETE) + processor.send(:process_message, patch_flag_message) + processor.send(:process_message, delete_flag_message) expect(config.feature_store.get(LaunchDarkly::FEATURES, "key")).to eq(nil) end it "will accept DELETE methods for segments" do - processor.send(:process_message, patch_seg_message, LaunchDarkly::PATCH) - processor.send(:process_message, delete_seg_message, LaunchDarkly::DELETE) + processor.send(:process_message, patch_seg_message) + processor.send(:process_message, delete_seg_message) expect(config.feature_store.get(LaunchDarkly::SEGMENTS, "key")).to eq(nil) end it "will accept INDIRECT PATCH method for flags" do flag = { key: 'key', version: 1 } allow(requestor).to receive(:request_flag).with(flag[:key]).and_return(flag) - processor.send(:process_message, indirect_patch_flag_message, LaunchDarkly::INDIRECT_PATCH); + processor.send(:process_message, indirect_patch_flag_message); expect(config.feature_store.get(LaunchDarkly::FEATURES, flag[:key])).to eq(flag) end it "will accept INDIRECT PATCH method for segments" do segment = { key: 'key', version: 1 } allow(requestor).to receive(:request_segment).with(segment[:key]).and_return(segment) - processor.send(:process_message, indirect_patch_segment_message, LaunchDarkly::INDIRECT_PATCH); + processor.send(:process_message, indirect_patch_segment_message); expect(config.feature_store.get(LaunchDarkly::SEGMENTS, segment[:key])).to eq(segment) end it "will log a warning if the method is not recognized" do expect(processor.instance_variable_get(:@config).logger).to receive :warn - processor.send(:process_message, put_message, "get") + processor.send(:process_message, SSE::StreamEvent.new(type: :get, data: "", id: nil)) end end end