diff --git a/examples/redis_flag_cache.rb b/examples/redis_flag_cache.rb new file mode 100644 index 0000000..80161d0 --- /dev/null +++ b/examples/redis_flag_cache.rb @@ -0,0 +1,112 @@ +# frozen_string_literal: true + +# Redis-based distributed cache for PostHog feature flag definitions. +# +# This example demonstrates how to implement a FlagDefinitionCacheProvider +# using Redis for multi-instance deployments (leader election pattern). +# +# Usage: +# require 'redis' +# require 'posthog' +# require_relative 'redis_flag_cache' +# +# redis = Redis.new(host: 'localhost', port: 6379) +# cache = RedisFlagCache.new(redis, service_key: 'my-service') +# +# posthog = PostHog::Client.new( +# api_key: '', +# personal_api_key: '', +# flag_definition_cache_provider: cache +# ) +# +# Requirements: +# gem install redis + +require 'json' +require 'securerandom' + +# A distributed cache for PostHog feature flag definitions using Redis. +# +# In a multi-instance deployment (e.g., multiple serverless functions or +# containers), we want only ONE instance to poll PostHog for flag updates, +# while all instances share the cached results. This prevents N instances +# from making N redundant API calls. +# +# Uses leader election: +# - One instance "wins" and becomes responsible for fetching +# - Other instances read from the shared cache +# - If the leader dies, the lock expires (TTL) and another instance takes over +# +# Uses Lua scripts for atomic operations, following Redis distributed lock +# best practices: https://redis.io/docs/latest/develop/clients/patterns/distributed-locks/ +class RedisFlagCache + LOCK_TTL_MS = 60 * 1000 # 60 seconds, should be longer than the flags poll interval + CACHE_TTL_SECONDS = 60 * 60 * 24 # 24 hours + + # Lua script: acquire lock if free, or extend if we own it + LUA_TRY_LEAD = <<~LUA + local current = redis.call('GET', KEYS[1]) + if current == false then + redis.call('SET', KEYS[1], ARGV[1], 'PX', ARGV[2]) + return 1 + elseif current == ARGV[1] then + redis.call('PEXPIRE', KEYS[1], ARGV[2]) + return 1 + end + return 0 + LUA + + # Lua script: release lock only if we own it + LUA_STOP_LEAD = <<~LUA + if redis.call('GET', KEYS[1]) == ARGV[1] then + return redis.call('DEL', KEYS[1]) + end + return 0 + LUA + + # @param redis [Redis] A redis client instance + # @param service_key [String] Unique identifier for this service/environment, + # used to scope Redis keys. Examples: "my-api-prod", "checkout-service" + # + # Redis keys created: + # - posthog:flags:{service_key} — cached flag definitions (JSON) + # - posthog:flags:{service_key}:lock — leader election lock + def initialize(redis, service_key:) + @redis = redis + @cache_key = "posthog:flags:#{service_key}" + @lock_key = "posthog:flags:#{service_key}:lock" + @instance_id = SecureRandom.uuid + end + + # Retrieve cached flag definitions from Redis. + # + # @return [Hash, nil] Cached flag definitions, or nil if cache is empty + def flag_definitions + cached = @redis.get(@cache_key) + return nil unless cached + + JSON.parse(cached) + end + + # Determine if this instance should fetch flag definitions from PostHog. + # + # Atomically either acquires the lock (if free) or extends it (if we own it). + # + # @return [Boolean] true if this instance is the leader and should fetch + def should_fetch_flag_definitions? + result = @redis.eval(LUA_TRY_LEAD, keys: [@lock_key], argv: [@instance_id, LOCK_TTL_MS.to_s]) + result == 1 + end + + # Store fetched flag definitions in Redis. + # + # @param data [Hash] Flag definitions to cache + def on_flag_definitions_received(data) + @redis.set(@cache_key, JSON.dump(data), ex: CACHE_TTL_SECONDS) + end + + # Release leadership if we hold it. Safe to call even if not the leader. + def shutdown + @redis.eval(LUA_STOP_LEAD, keys: [@lock_key], argv: [@instance_id]) + end +end diff --git a/lib/posthog.rb b/lib/posthog.rb index 0d9b085..c6d8c54 100644 --- a/lib/posthog.rb +++ b/lib/posthog.rb @@ -12,3 +12,4 @@ require 'posthog/exception_capture' require 'posthog/feature_flag_error' require 'posthog/feature_flag_result' +require 'posthog/flag_definition_cache' diff --git a/lib/posthog/client.rb b/lib/posthog/client.rb index 5e65860..74e4dab 100644 --- a/lib/posthog/client.rb +++ b/lib/posthog/client.rb @@ -68,6 +68,9 @@ def _decrement_instance_count(api_key) # to be sent to PostHog or nil to prevent the event from being sent. e.g. `before_send: ->(event) { event }` # @option opts [Bool] :disable_singleton_warning +true+ to suppress the warning when multiple clients # share the same API key. Use only when you intentionally need multiple clients. Defaults to +false+. + # @option opts [Object] :flag_definition_cache_provider An object implementing the + # {FlagDefinitionCacheProvider} interface for distributed flag definition caching. + # EXPERIMENTAL: This API may change in future minor version bumps. def initialize(opts = {}) symbolize_keys!(opts) @@ -120,7 +123,8 @@ def initialize(opts = {}) @api_key, opts[:host], opts[:feature_flag_request_timeout_seconds] || Defaults::FeatureFlags::FLAG_REQUEST_TIMEOUT_SECONDS, - opts[:on_error] + opts[:on_error], + flag_definition_cache_provider: opts[:flag_definition_cache_provider] ) @distinct_id_has_sent_flag_calls = SizeLimitedHash.new(Defaults::MAX_HASH_SIZE) do |hash, key| diff --git a/lib/posthog/feature_flags.rb b/lib/posthog/feature_flags.rb index 5d8c0b7..2d09e46 100644 --- a/lib/posthog/feature_flags.rb +++ b/lib/posthog/feature_flags.rb @@ -6,6 +6,7 @@ require 'posthog/version' require 'posthog/logging' require 'posthog/feature_flag' +require 'posthog/flag_definition_cache' require 'digest' module PostHog @@ -29,7 +30,8 @@ def initialize( project_api_key, host, feature_flag_request_timeout_seconds, - on_error = nil + on_error = nil, + flag_definition_cache_provider: nil ) @polling_interval = polling_interval || 30 @personal_api_key = personal_api_key @@ -44,6 +46,10 @@ def initialize( @on_error = on_error || proc { |status, error| } @quota_limited = Concurrent::AtomicBoolean.new(false) @flags_etag = Concurrent::AtomicReference.new(nil) + + @flag_definition_cache_provider = flag_definition_cache_provider + FlagDefinitionCacheProvider.validate!(@flag_definition_cache_provider) if @flag_definition_cache_provider + @task = Concurrent::TimerTask.new( execution_interval: polling_interval @@ -372,6 +378,13 @@ def get_feature_flag_payload( def shutdown_poller @task.shutdown + return unless @flag_definition_cache_provider + + begin + @flag_definition_cache_provider.shutdown + rescue StandardError => e + logger.error("[FEATURE FLAGS] Cache provider shutdown error: #{e}") + end end # Class methods @@ -1006,6 +1019,38 @@ def variant_lookup_table(flag) end def _load_feature_flags + should_fetch = true + + if @flag_definition_cache_provider + begin + should_fetch = @flag_definition_cache_provider.should_fetch_flag_definitions? + rescue StandardError => e + logger.error("[FEATURE FLAGS] Cache provider should_fetch error: #{e}") + should_fetch = true + end + end + + unless should_fetch + begin + cached_data = @flag_definition_cache_provider.flag_definitions + if cached_data + logger.debug '[FEATURE FLAGS] Using cached flag definitions from external cache' + _apply_flag_definitions(cached_data) + return + elsif @feature_flags.empty? + # Emergency fallback: cache empty and no flags loaded -> fetch from API + should_fetch = true + end + rescue StandardError => e + logger.error("[FEATURE FLAGS] Cache provider get error: #{e}") + should_fetch = true + end + end + + _fetch_and_apply_flag_definitions if should_fetch + end + + def _fetch_and_apply_flag_definitions begin res = _request_feature_flag_definitions(etag: @flags_etag.value) rescue StandardError => e @@ -1040,21 +1085,48 @@ def _load_feature_flags # Only update ETag on successful responses with flag data @flags_etag.value = res[:etag] - @feature_flags = res[:flags] || [] - @feature_flags_by_key = {} - @feature_flags.each do |flag| - @feature_flags_by_key[flag[:key]] = flag unless flag[:key].nil? - end - @group_type_mapping = res[:group_type_mapping] || {} - @cohorts = res[:cohorts] || {} - - logger.debug "Loaded #{@feature_flags.length} feature flags and #{@cohorts.length} cohorts" - @loaded_flags_successfully_once.make_true if @loaded_flags_successfully_once.false? + _apply_flag_definitions(res) + _store_in_cache_provider else logger.debug "Failed to load feature flags: #{res}" end end + def _store_in_cache_provider + return unless @flag_definition_cache_provider + + begin + data = { + flags: @feature_flags.to_a, + group_type_mapping: @group_type_mapping.to_h, + cohorts: @cohorts.to_h + } + @flag_definition_cache_provider.on_flag_definitions_received(data) + rescue StandardError => e + logger.error("[FEATURE FLAGS] Cache provider store error: #{e}") + end + end + + def _apply_flag_definitions(data) + flags = get_by_symbol_or_string_key(data, 'flags') || [] + group_type_mapping = get_by_symbol_or_string_key(data, 'group_type_mapping') || {} + cohorts = get_by_symbol_or_string_key(data, 'cohorts') || {} + + @feature_flags = Concurrent::Array.new(flags.map { |f| deep_symbolize_keys(f) }) + + new_by_key = {} + @feature_flags.each do |flag| + new_by_key[flag[:key]] = flag unless flag[:key].nil? + end + @feature_flags_by_key = new_by_key + + @group_type_mapping = Concurrent::Hash[deep_symbolize_keys(group_type_mapping)] + @cohorts = Concurrent::Hash[deep_symbolize_keys(cohorts)] + + logger.debug "Loaded #{@feature_flags.length} feature flags and #{@cohorts.length} cohorts" + @loaded_flags_successfully_once.make_true if @loaded_flags_successfully_once.false? + end + def _request_feature_flag_definitions(etag: nil) uri = URI("#{@host}/api/feature_flag/local_evaluation") uri.query = URI.encode_www_form([['token', @project_api_key], %w[send_cohorts true]]) diff --git a/lib/posthog/flag_definition_cache.rb b/lib/posthog/flag_definition_cache.rb new file mode 100644 index 0000000..975d99d --- /dev/null +++ b/lib/posthog/flag_definition_cache.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +module PostHog + # Interface for external caching of feature flag definitions. + # + # EXPERIMENTAL: This API may change in future minor version bumps. + # + # Enables multi-worker environments (Kubernetes, load-balanced servers, + # serverless functions) to share flag definitions via an external cache, + # reducing redundant API calls. + # + # Implement the four required methods on any object and pass it as the + # +:flag_definition_cache_provider+ option when creating a {Client}. + # + # == Required Methods + # + # [+flag_definitions+] + # Retrieve cached flag definitions. Return a Hash with +:flags+, + # +:group_type_mapping+, and +:cohorts+ keys, or +nil+ if the cache + # is empty. Returning +nil+ triggers an API fetch when no flags are + # loaded yet (emergency fallback). + # + # [+should_fetch_flag_definitions?+] + # Return +true+ if this instance should fetch new definitions from the + # API, +false+ to read from cache instead. Use for distributed lock + # coordination so only one worker fetches at a time. + # + # [+on_flag_definitions_received(data)+] + # Called after successfully fetching new definitions from the API. + # +data+ is a Hash with +:flags+, +:group_type_mapping+, and +:cohorts+ + # keys (plain Ruby types, not Concurrent:: wrappers). Store it in your + # external cache. + # + # [+shutdown+] + # Called when the PostHog client shuts down. Release any distributed + # locks and clean up resources. + # + # == Error Handling + # + # All methods are wrapped in +begin/rescue+. Errors are logged but never + # break flag evaluation: + # - +should_fetch_flag_definitions?+ errors default to fetching (fail-safe) + # - +flag_definitions+ errors fall back to API fetch + # - +on_flag_definitions_received+ errors are logged; flags remain in memory + # - +shutdown+ errors are logged; shutdown continues + # + # == Example + # + # cache = RedisFlagCache.new(redis, service_key: 'my-service') + # client = PostHog::Client.new( + # api_key: '', + # personal_api_key: '', + # flag_definition_cache_provider: cache + # ) + # + module FlagDefinitionCacheProvider + REQUIRED_METHODS = %i[ + flag_definitions + should_fetch_flag_definitions? + on_flag_definitions_received + shutdown + ].freeze + + # Validates that +provider+ implements all required methods. + # Raises +ArgumentError+ listing any missing methods. + # + # @param provider [Object] the cache provider to validate + # @raise [ArgumentError] if any required methods are missing + def self.validate!(provider) + missing = REQUIRED_METHODS.reject { |m| provider.respond_to?(m) } + return if missing.empty? + + raise ArgumentError, + "Flag definition cache provider is missing required methods: #{missing.join(', ')}. " \ + 'See PostHog::FlagDefinitionCacheProvider for the required interface.' + end + end +end diff --git a/lib/posthog/utils.rb b/lib/posthog/utils.rb index 01bb1e3..58ee8ff 100644 --- a/lib/posthog/utils.rb +++ b/lib/posthog/utils.rb @@ -27,6 +27,21 @@ def stringify_keys(hash) hash.transform_keys(&:to_s) end + # public: Recursively convert all keys to symbols in a Hash/Array tree + # + def deep_symbolize_keys(obj) + case obj + when Hash + obj.each_with_object({}) do |(key, value), result| + result[key.to_sym] = deep_symbolize_keys(value) + end + when Array + obj.map { |item| deep_symbolize_keys(item) } + else + obj + end + end + # public: Returns a new hash with all the date values in the into iso8601 # strings # diff --git a/spec/posthog/flag_definition_cache_spec.rb b/spec/posthog/flag_definition_cache_spec.rb new file mode 100644 index 0000000..2aba7d1 --- /dev/null +++ b/spec/posthog/flag_definition_cache_spec.rb @@ -0,0 +1,483 @@ +# frozen_string_literal: true + +require 'spec_helper' + +# MockCacheProvider is scoped outside PostHog module to avoid polluting the production namespace. +class MockCacheProvider + attr_accessor :stored_data, :should_fetch_return_value, + :should_fetch_error, :get_error, :on_received_error, :shutdown_error + attr_reader :get_call_count, :should_fetch_call_count, :on_received_call_count, :shutdown_call_count + + def initialize + @stored_data = nil + @should_fetch_return_value = true + @get_call_count = 0 + @should_fetch_call_count = 0 + @on_received_call_count = 0 + @shutdown_call_count = 0 + @should_fetch_error = nil + @get_error = nil + @on_received_error = nil + @shutdown_error = nil + end + + def flag_definitions + @get_call_count += 1 + raise @get_error if @get_error + + @stored_data + end + + def should_fetch_flag_definitions? + @should_fetch_call_count += 1 + raise @should_fetch_error if @should_fetch_error + + @should_fetch_return_value + end + + def on_flag_definitions_received(data) + @on_received_call_count += 1 + raise @on_received_error if @on_received_error + + @stored_data = data + end + + def shutdown + @shutdown_call_count += 1 + raise @shutdown_error if @shutdown_error + end +end + +module PostHog + describe FlagDefinitionCacheProvider do + describe '.validate!' do + it 'passes for a complete provider' do + provider = MockCacheProvider.new + expect { FlagDefinitionCacheProvider.validate!(provider) }.not_to raise_error + end + + it 'raises ArgumentError for an object missing all methods' do + provider = Object.new + expect { FlagDefinitionCacheProvider.validate!(provider) }.to raise_error( + ArgumentError, + /missing required methods.*(flag_definitions|should_fetch|on_flag|shutdown)/ + ) + end + + it 'raises ArgumentError listing only the missing methods' do + provider = Object.new + def provider.flag_definitions; end + def provider.shutdown; end + + expect { FlagDefinitionCacheProvider.validate!(provider) }.to raise_error(ArgumentError) do |error| + # Extract and parse the missing methods list + missing_str = error.message.split('missing required methods: ').last.split('.').first + missing_methods = missing_str.split(', ').map(&:strip) + expect(missing_methods).to include('should_fetch_flag_definitions?') + expect(missing_methods).to include('on_flag_definitions_received') + # Verify the implemented methods are NOT listed as missing + expect(missing_methods).not_to include('flag_definitions') + expect(missing_methods).not_to include('shutdown') + end + end + end + end + + describe 'flag definition cache integration' do + let(:provider) { MockCacheProvider.new } + let(:local_eval_url) { 'https://app.posthog.com/api/feature_flag/local_evaluation?token=testsecret&send_cohorts=true' } + + # Sample flag data with string keys (simulating JSON deserialization from cache) + let(:sample_flags_data) do + { + 'flags' => [ + { + 'id' => 1, + 'key' => 'test-flag', + 'active' => true, + 'filters' => { + 'groups' => [ + { + 'properties' => [ + { 'key' => 'region', 'operator' => 'exact', 'value' => ['USA'], 'type' => 'person' } + ], + 'rollout_percentage' => 100 + } + ] + } + }, + { 'id' => 2, 'key' => 'disabled-flag', 'active' => false, 'filters' => {} } + ], + 'group_type_mapping' => { '0' => 'company', '1' => 'project' }, + 'cohorts' => { '1' => { 'type' => 'AND', 'values' => [] } } + } + end + + def create_client_with_cache(provider:, stub_api: true) + if stub_api + stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + end + Client.new( + api_key: API_KEY, + personal_api_key: API_KEY, + test_mode: true, + flag_definition_cache_provider: provider + ) + end + + def get_poller(client) + client.instance_variable_get(:@feature_flags_poller) + end + + describe 'cache initialization' do + it 'uses cached data when should_fetch? returns false and cache has data' do + provider.should_fetch_return_value = false + provider.stored_data = sample_flags_data + + # The initial load_feature_flags call should use cache, not API + stub = stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider, stub_api: false) + + # API should not have been called (initial load uses cache) + expect(stub).not_to have_been_requested + + poller = get_poller(client) + expect(poller.instance_variable_get(:@feature_flags).length).to eq(2) + expect(provider.get_call_count).to be >= 1 + end + + it 'fetches from API when should_fetch? returns true' do + provider.should_fetch_return_value = true + + stub = stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider, stub_api: false) + + expect(stub).to have_been_requested + poller = get_poller(client) + expect(poller.instance_variable_get(:@feature_flags).length).to eq(2) + end + + it 'uses emergency fallback when cache is empty and no flags loaded' do + provider.should_fetch_return_value = false + provider.stored_data = nil # Cache empty + + stub = stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider, stub_api: false) + + # Should have fallen back to API + expect(stub).to have_been_requested + poller = get_poller(client) + expect(poller.instance_variable_get(:@feature_flags).length).to eq(2) + end + + it 'preserves existing flags when cache returns nil but flags already loaded' do + provider.should_fetch_return_value = true + + stub = stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider, stub_api: false) + + poller = get_poller(client) + expect(poller.instance_variable_get(:@feature_flags).length).to eq(2) + + # Now simulate: should_fetch false, cache nil, but flags already loaded + provider.should_fetch_return_value = false + provider.stored_data = nil + + poller.send(:_load_feature_flags) + # Flags should be preserved (no emergency fallback since flags exist) + expect(poller.instance_variable_get(:@feature_flags).length).to eq(2) + # API should have been called only once (during init), not during the second load + expect(stub).to have_been_requested.once + end + end + + describe 'fetch coordination' do + it 'calls should_fetch? before each poll cycle' do + provider.should_fetch_return_value = true + + stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider) + + initial_count = provider.should_fetch_call_count + + poller = get_poller(client) + poller.send(:_load_feature_flags) + + expect(provider.should_fetch_call_count).to eq(initial_count + 1) + end + + it 'stores data in cache after API fetch' do + provider.should_fetch_return_value = true + + stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + create_client_with_cache(provider: provider) + + expect(provider.on_received_call_count).to be >= 1 + expect(provider.stored_data).not_to be_nil + expect(provider.stored_data[:flags].length).to eq(2) + expect(provider.stored_data[:group_type_mapping]).to be_a(Hash) + expect(provider.stored_data[:cohorts]).to be_a(Hash) + end + + it 'does not call on_flag_definitions_received when cache is used' do + provider.should_fetch_return_value = true + + stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider) + + initial_on_received_count = provider.on_received_call_count + + # Now use cache + provider.should_fetch_return_value = false + + poller = get_poller(client) + poller.send(:_load_feature_flags) + + expect(provider.on_received_call_count).to eq(initial_on_received_count) + end + + it 'does not update cache on 304 Not Modified' do + provider.should_fetch_return_value = true + + # First call: return flags + stub_request(:get, local_eval_url) + .to_return( + { status: 200, body: sample_flags_data.to_json, headers: { 'ETag' => 'abc123' } }, + { status: 304, body: '', headers: { 'ETag' => 'abc123' } } + ) + client = create_client_with_cache(provider: provider, stub_api: false) + + on_received_after_init = provider.on_received_call_count + + # Second call: 304 + poller = get_poller(client) + poller.send(:_load_feature_flags) + + expect(provider.on_received_call_count).to eq(on_received_after_init) + end + end + + describe 'error handling' do + it 'defaults to fetching from API when should_fetch? raises' do + provider.should_fetch_error = RuntimeError.new('Redis connection error') + + stub = stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider, stub_api: false) + + expect(stub).to have_been_requested + poller = get_poller(client) + expect(poller.instance_variable_get(:@feature_flags).length).to eq(2) + end + + it 'falls back to API fetch when flag_definitions raises' do + provider.should_fetch_return_value = false + provider.get_error = RuntimeError.new('Redis timeout') + + stub = stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider, stub_api: false) + + expect(stub).to have_been_requested + poller = get_poller(client) + expect(poller.instance_variable_get(:@feature_flags).length).to eq(2) + end + + it 'keeps flags in memory when on_flag_definitions_received raises' do + provider.should_fetch_return_value = true + provider.on_received_error = RuntimeError.new('Redis write error') + + stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider) + + poller = get_poller(client) + expect(poller.instance_variable_get(:@feature_flags).length).to eq(2) + end + + it 'continues shutdown when provider shutdown raises' do + provider.should_fetch_return_value = true + provider.shutdown_error = RuntimeError.new('Redis error') + + stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider) + + expect { client.shutdown }.not_to raise_error + expect(provider.shutdown_call_count).to eq(1) + end + end + + describe 'shutdown lifecycle' do + it 'calls provider shutdown via client shutdown' do + provider.should_fetch_return_value = true + + stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider) + + client.shutdown + expect(provider.shutdown_call_count).to eq(1) + end + end + + describe 'backward compatibility' do + it 'works without a cache provider' do + stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + + client = Client.new( + api_key: API_KEY, + personal_api_key: API_KEY, + test_mode: true + ) + + poller = client.instance_variable_get(:@feature_flags_poller) + expect(poller.instance_variable_get(:@feature_flags).length).to eq(2) + end + end + + describe 'data integrity' do + it 'evaluates flags loaded from cache' do + provider.should_fetch_return_value = false + provider.stored_data = sample_flags_data + + stub = stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider, stub_api: false) + + expect(stub).not_to have_been_requested + + result = client.get_feature_flag( + 'test-flag', 'some-user', + person_properties: { 'region' => 'USA' }, + only_evaluate_locally: true + ) + expect(result).to eq(true) + + result = client.get_feature_flag( + 'disabled-flag', 'some-user', + only_evaluate_locally: true + ) + expect(result).to eq(false) + end + + it 'handles string-keyed cache data correctly' do + provider.should_fetch_return_value = false + provider.stored_data = sample_flags_data + + stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider, stub_api: false) + + poller = get_poller(client) + flags_by_key = poller.instance_variable_get(:@feature_flags_by_key) + expect(flags_by_key).to have_key('test-flag') + expect(flags_by_key['test-flag'][:active]).to eq(true) + end + + it 'loads group_type_mapping from cache' do + provider.should_fetch_return_value = false + provider.stored_data = sample_flags_data + + stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider, stub_api: false) + + poller = get_poller(client) + mapping = poller.instance_variable_get(:@group_type_mapping) + expect(mapping[:'0']).to eq('company') + expect(mapping[:'1']).to eq('project') + end + + it 'loads cohorts from cache' do + provider.should_fetch_return_value = false + provider.stored_data = sample_flags_data + + stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider, stub_api: false) + + poller = get_poller(client) + cohorts = poller.instance_variable_get(:@cohorts) + expect(cohorts[:'1']).to be_a(Hash) + expect(cohorts[:'1'][:type]).to eq('AND') + end + + it 'updates cache when API returns new data' do + provider.should_fetch_return_value = true + + stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + create_client_with_cache(provider: provider) + + expect(provider.stored_data).not_to be_nil + expect(provider.stored_data[:flags].length).to eq(2) + expect(provider.stored_data[:flags].first[:key]).to eq('test-flag') + end + + it 'roundtrip: data stored after API fetch can be loaded via JSON serialization' do + provider.should_fetch_return_value = true + + stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client1 = create_client_with_cache(provider: provider, stub_api: false) + client1.shutdown + + # Simulate what a real cache (e.g., Redis with JSON serialization) would do: + # JSON.parse(JSON.dump(data)) converts symbol keys back to strings + serialized_data = JSON.parse(JSON.dump(provider.stored_data)) + + # Create a second "instance" that reads from cache. + # Stub the API with EMPTY flags so we can distinguish cache vs API results: + # if cache works, 'test-flag' evaluates to true; if API is used, it returns nil. + provider2 = MockCacheProvider.new + provider2.should_fetch_return_value = false + provider2.stored_data = serialized_data + + stub_request(:get, local_eval_url) + .to_return(status: 200, body: { 'flags' => [], 'group_type_mapping' => {}, 'cohorts' => {} }.to_json) + client2 = create_client_with_cache(provider: provider2, stub_api: false) + + expect(provider2.get_call_count).to be >= 1 + + result = client2.get_feature_flag( + 'test-flag', 'some-user', + person_properties: { 'region' => 'USA' }, + only_evaluate_locally: true + ) + expect(result).to eq(true) + end + + it 'picks up updated cache data on subsequent poll cycles' do + provider.should_fetch_return_value = false + provider.stored_data = sample_flags_data + + stub = stub_request(:get, local_eval_url) + .to_return(status: 200, body: sample_flags_data.to_json) + client = create_client_with_cache(provider: provider, stub_api: false) + + poller = get_poller(client) + expect(poller.instance_variable_get(:@feature_flags).length).to eq(2) + + # Simulate leader updating cache with a new flag + updated_flags = sample_flags_data['flags'] + [ + { 'id' => 3, 'key' => 'new-flag', 'active' => true, 'filters' => {} } + ] + provider.stored_data = sample_flags_data.merge('flags' => updated_flags) + + poller.send(:_load_feature_flags) + expect(poller.instance_variable_get(:@feature_flags).length).to eq(3) + expect(stub).not_to have_been_requested + end + end + end +end