Skip to content
Snippets Groups Projects
Commit 7d1b5fe5 authored by Sylvester Chin's avatar Sylvester Chin :red_circle: Committed by Bob Van Landuyt
Browse files

Add cross-slot pipeline functionality

This MR adds Gitlab::Redis::CrossSlot module to wrap over .pipelined
calls for relevant call-sites.

Changelog: performance
parent 2ba2bd31
No related branches found
No related tags found
2 merge requests!120251Update cache workload to be Redis Cluster compatible,!119439Draft: Prevent file variable content expansion in downstream pipeline
Showing
with 317 additions and 94 deletions
......@@ -6,6 +6,7 @@
Redis::Client.prepend(Gitlab::Instrumentation::RedisInterceptor)
Redis::Cluster::NodeLoader.prepend(Gitlab::Patch::NodeLoader)
Redis::Cluster.prepend(Gitlab::Patch::RedisCluster)
# Make sure we initialize a Redis connection pool before multi-threaded
# execution starts by
......
......@@ -3,6 +3,9 @@ development:
chat:
cluster:
- redis://localhost:7001
cache:
cluster:
- redis://localhost:7001
rate_limiting:
cluster:
- redis://localhost:7001
......@@ -11,6 +14,9 @@ test:
chat:
cluster:
- redis://localhost:7001
cache:
cluster:
- redis://localhost:7001
rate_limiting:
cluster:
- redis://localhost:7001
......@@ -65,14 +65,8 @@ def delete_by_email(*emails)
keys = emails.map { |email| email_key(email) }
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
if ::Feature.enabled?(:use_pipeline_over_multikey)
expired_count = 0
keys.each_slice(1000) do |subset|
expired_count += redis.pipelined do |pipeline|
subset.each { |key| pipeline.unlink(key) }
end.sum
end
expired_count
if ::Feature.enabled?(:use_pipeline_over_multikey) || Gitlab::Redis::ClusterUtil.cluster?(redis)
Gitlab::Redis::ClusterUtil.batch_unlink(keys, redis)
else
redis.unlink(*keys)
end
......
......@@ -162,13 +162,13 @@ def self.values_from_set(raw_key)
def self.write_multiple(mapping, key_prefix: nil, timeout: TIMEOUT)
with_redis do |redis|
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
redis.pipelined do |multi|
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
mapping.each do |raw_key, value|
key = cache_key_for("#{key_prefix}#{raw_key}")
validate_redis_value!(value)
multi.set(key, value, ex: timeout)
pipeline.set(key, value, ex: timeout)
end
end
end
......
......@@ -16,7 +16,7 @@ class << self
def write_multiple(mapping)
with_redis do |redis|
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
redis.pipelined do |pipelined|
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipelined|
mapping.each do |raw_key, value|
key = cache_key_for(raw_key)
......@@ -41,8 +41,8 @@ def read_multiple(raw_keys)
content =
with_redis do |redis|
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
if ::Feature.enabled?(:use_pipeline_over_multikey)
redis.pipelined do |pipeline|
if ::Feature.enabled?(:use_pipeline_over_multikey) || Gitlab::Redis::ClusterUtil.cluster?(redis)
Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
keys.each { |key| pipeline.get(key) }
end
else
......@@ -72,10 +72,8 @@ def clear_multiple(raw_keys)
with_redis do |redis|
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
if ::Feature.enabled?(:use_pipeline_over_multikey)
redis.pipelined do |pipeline|
keys.each { |key| pipeline.del(key) }
end.sum
if ::Feature.enabled?(:use_pipeline_over_multikey) || Gitlab::Redis::ClusterUtil.cluster?(redis)
Gitlab::Redis::ClusterUtil.batch_unlink(keys, redis)
else
redis.del(keys)
end
......
......@@ -11,7 +11,7 @@ def self.bulk_read(subjects)
Gitlab::Redis::Cache.with do |r|
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
r.pipelined do |pipeline|
Gitlab::Redis::CrossSlot::Pipeline.new(r).pipelined do |pipeline|
subjects.each do |subject|
results[subject.cache_key] = new(subject).read(pipeline)
end
......
......@@ -3,14 +3,12 @@
module Gitlab
module Patch
module RedisCacheStore
PATCH_INPUT_LIMIT = 100
PIPELINE_BATCH_SIZE = 100
# We will try keep patched code explicit and matching the original signature in
# https://github.com/rails/rails/blob/v6.1.7.2/activesupport/lib/active_support/cache/redis_cache_store.rb#L361
def read_multi_mget(*names) # rubocop:disable Style/ArgumentsForwarding
return super unless enable_rails_cache_pipeline_patch?
return super if names.size > PATCH_INPUT_LIMIT # avoid excessive apdex degradation during benchmarking exercise
patched_read_multi_mget(*names) # rubocop:disable Style/ArgumentsForwarding
end
......@@ -23,7 +21,7 @@ def delete_multi_entries(entries, **options)
delete_count = 0
redis.with do |conn|
entries.each_slice(PIPELINE_BATCH_SIZE) do |subset|
delete_count += conn.pipelined do |pipeline|
delete_count += Gitlab::Redis::CrossSlot::Pipeline.new(conn).pipelined do |pipeline|
subset.each { |entry| pipeline.del(entry) }
end.sum
end
......@@ -58,14 +56,19 @@ def patched_read_multi_mget(*names)
end
def pipeline_mget(conn, keys)
conn.pipelined do |p|
keys.each { |key| p.get(key) }
keys.each_slice(PIPELINE_BATCH_SIZE).flat_map do |subset|
Gitlab::Redis::CrossSlot::Pipeline.new(conn).pipelined do |p|
subset.each { |key| p.get(key) }
end
end
end
private
def enable_rails_cache_pipeline_patch?
# if we do not enable patch, the application will be susceptible to cross-slot errors
return true if redis.with { |c| ::Gitlab::Redis::ClusterUtil.cluster?(c) }
redis_cache? &&
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands? &&
::Feature.enabled?(:enable_rails_cache_pipeline_patch) # rubocop:disable Cop/FeatureFlagUsage
......
# frozen_string_literal: true
# Patch to expose `find_node_key` method for cross-slot pipelining
# In redis v5.0.x, cross-slot pipelining is implemented via redis-cluster-client.
# This patch should be removed since there is no need for it.
# Gitlab::Redis::CrossSlot and its usage should be removed as well.
if Gem::Version.new(Redis::VERSION) != Gem::Version.new('4.8.0')
raise 'New version of redis detected, please remove or update this patch'
end
module Gitlab
module Patch
module RedisCluster
# _find_node_key exposes a private function of the same name in Redis::Cluster.
# See https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/cluster.rb#L282
def _find_node_key(command)
find_node_key(command)
end
end
end
end
......@@ -11,17 +11,14 @@ def initialize(expires_in: 10.minutes)
end
def clear_cache!(key)
use_pipeline = ::Feature.enabled?(:use_pipeline_over_multikey)
with do |redis|
keys = read(key).map { |value| "#{cache_namespace}:#{value}" }
keys << cache_key(key)
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
if ::Feature.enabled?(:use_pipeline_over_multikey)
keys.each_slice(1000) do |subset|
redis.pipelined do |pipeline|
subset.each { |key| pipeline.unlink(key) }
end
end
if use_pipeline || Gitlab::Redis::ClusterUtil.cluster?(redis)
Gitlab::Redis::ClusterUtil.batch_unlink(keys, redis)
else
redis.pipelined do |pipeline|
keys.each_slice(1000) { |subset| pipeline.unlink(*subset) }
......
# frozen_string_literal: true
module Gitlab
module Redis
module ClusterUtil
class << self
# clusters? is used to select Redis command types, on `true`, the subsequent
# commands should be compatible with Redis Cluster.
#
# When working with MultiStore, if even 1 of 2 stores is a Redis::Cluster,
# we should err on the side of caution and return `true `,
def cluster?(obj)
if obj.is_a?(MultiStore)
cluster?(obj.primary_store) || cluster?(obj.secondary_store)
else
obj.respond_to?(:_client) && obj._client.is_a?(::Redis::Cluster)
end
end
def batch_unlink(keys, redis)
expired_count = 0
keys.each_slice(1000) do |subset|
expired_count += Gitlab::Redis::CrossSlot::Pipeline.new(redis).pipelined do |pipeline|
subset.each { |key| pipeline.unlink(key) }
end.sum
end
expired_count
end
end
end
end
end
# frozen_string_literal: true
module Gitlab
module Redis
module CrossSlot
class Router
attr_reader :node_mapping, :futures, :node_sequence, :cmd_queue
delegate :respond_to_missing?, to: :@redis
# This map contains redis-rb methods which does not map directly
# to a standard Redis command. It is used transform unsupported commands to standard commands
# to find the node key for unsupported commands.
#
# Redis::Cluster::Command only contains details of commands which the Redis Server
# returns. Hence, commands like mapped_hmget and hscan_each internally will call the
# base command, hmget and hscan respectively.
#
# See https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/cluster/command.rb
UNSUPPORTED_CMD_MAPPING = {
# Internally, redis-rb calls the supported Redis command and transforms the output.
# See https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/commands/hashes.rb#L104
mapped_hmget: :hmget
}.freeze
# Initializes the CrossSlot::Router
# @param {::Redis}
def initialize(redis)
@redis = redis
@node_mapping = {}
@futures = {}
@node_sequence = []
@cmd_queue = []
end
# For now we intercept every redis.call and return a Gitlab-Future object.
# This method groups every commands to a node for fan-out. Commands are grouped using the first key.
#
# rubocop:disable Style/MissingRespondToMissing
def method_missing(cmd, *args, **kwargs, &blk)
# Note that we can re-map the command without affecting execution as it is
# solely for finding the node key. The original cmd will be executed.
node = @redis._client._find_node_key([UNSUPPORTED_CMD_MAPPING.fetch(cmd, cmd)] + args)
@node_mapping[node] ||= []
@futures[node] ||= []
@node_sequence << node
@node_mapping[node] << [cmd, args, kwargs || {}, blk]
f = Future.new
@futures[node] << f
@cmd_queue << [f, cmd, args, kwargs || {}, blk]
f
end
# rubocop:enable Style/MissingRespondToMissing
end
# Wraps over redis-rb's Future in
# https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis/pipeline.rb#L244
class Future
def set(future, is_val = false)
@redis_future = future
@is_val = is_val
end
def value
return @redis_val if @is_val
@redis_future.value
end
end
# Pipeline allows cross-slot pipelined to be called. The fan-out logic is implemented in
# https://github.com/redis-rb/redis-cluster-client/blob/master/lib/redis_client/cluster/pipeline.rb
# which is available in redis-rb v5.0.
#
# This file can be deprecated after redis-rb v4.8.0 is upgraded to v5.0
class Pipeline
# Initializes the CrossSlot::Pipeline
# @param {::Redis}
def initialize(redis)
@redis = redis
end
# pipelined is used in place of ::Redis `.pipelined` when running in a cluster context
# where cross-slot operations may happen.
def pipelined(&block)
# Directly call .pipelined and defer the pipeline execution to MultiStore.
# MultiStore could wrap over 0, 1, or 2 Redis Cluster clients, handling it here
# will not work for 2 clients since the key-slot topology can differ.
if use_cross_slot_pipelining?
router = Router.new(@redis)
yield router
execute_commands(router)
else
# use redis-rb's pipelined method
@redis.pipelined(&block)
end
end
private
def use_cross_slot_pipelining?
!@redis.instance_of?(::Gitlab::Redis::MultiStore) && @redis._client.instance_of?(::Redis::Cluster)
end
def execute_commands(router)
router.node_mapping.each do |node_key, commands|
# TODO possibly use Threads to speed up but for now `n` is 3-5 which is small.
@redis.pipelined do |p|
commands.each_with_index do |command, idx|
future = router.futures[node_key][idx]
cmd, args, kwargs, blk = command
future.set(p.public_send(cmd, *args, **kwargs, &blk)) # rubocop:disable GitlabSecurity/PublicSend
end
end
end
router.node_sequence.map do |node_key|
router.futures[node_key].shift.value
end
rescue ::Redis::CommandError => err
if err.message.start_with?('MOVED', 'ASK')
Gitlab::ErrorTracking.log_exception(err)
return execute_commands_sequentially(router)
end
raise
end
def execute_commands_sequentially(router)
router.cmd_queue.map do |command|
future, cmd, args, kwargs, blk = command
future.set(@redis.public_send(cmd, *args, **kwargs, &blk), true) # rubocop:disable GitlabSecurity/PublicSend
future.value
end
end
end
end
end
end
......@@ -71,6 +71,8 @@ def message
setnx
srem
unlink
memory
].freeze
PIPELINED_COMMANDS = %i[
......@@ -122,7 +124,7 @@ def initialize(primary_store, secondary_store, instance_name)
if use_primary_and_secondary_stores?
pipelined_both(name, *args, **kwargs, &block)
else
default_store.send(name, *args, **kwargs, &block)
send_command(default_store, name, *args, **kwargs, &block)
end
end
end
......@@ -289,6 +291,16 @@ def same_redis_store?
# rubocop:disable GitlabSecurity/PublicSend
def send_command(redis_instance, command_name, *args, **kwargs, &block)
# Run wrapped pipeline for each instance individually so that the fan-out is distinct.
# If both primary and secondary are Redis Clusters, the slot-node distribution could
# be different.
#
# We ignore args and kwargs since `pipelined` does not accept arguments
# See https://github.com/redis/redis-rb/blob/v4.8.0/lib/redis.rb#L164
if command_name.to_s == 'pipelined' && redis_instance._client.instance_of?(::Redis::Cluster)
return Gitlab::Redis::CrossSlot::Pipeline.new(redis_instance).pipelined(&block)
end
if block
# Make sure that block is wrapped and executed only on the redis instance that is executing the block
redis_instance.send(command_name, *args, **kwargs) do |*params|
......
......@@ -40,7 +40,11 @@ def delete(*keys)
keys = keys.map { |key| cache_key(key) }
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
redis.unlink(*keys)
if Gitlab::Redis::ClusterUtil.cluster?(redis)
Gitlab::Redis::ClusterUtil.batch_unlink(keys, redis)
else
redis.unlink(*keys)
end
end
end
end
......
......@@ -22,14 +22,8 @@ def expire(*keys)
keys_to_expire = keys.map { |key| cache_key(key) }
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
if ::Feature.enabled?(:use_pipeline_over_multikey)
expired_count = 0
keys_to_expire.each_slice(1000) do |subset|
expired_count += redis.pipelined do |pipeline|
subset.each { |key| pipeline.unlink(key) }
end.sum
end
expired_count
if ::Feature.enabled?(:use_pipeline_over_multikey) || Gitlab::Redis::ClusterUtil.cluster?(redis)
Gitlab::Redis::ClusterUtil.batch_unlink(keys_to_expire, redis)
else
redis.unlink(*keys_to_expire)
end
......
......@@ -21,7 +21,7 @@ namespace :cache do
)
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
redis.del(*keys) if keys.any?
Gitlab::Redis::ClusterUtil.batch_unlink(keys, redis) if keys.any?
end
break if cursor == REDIS_SCAN_START_STOP
......
......@@ -103,7 +103,7 @@ def read(key, subkey)
context 'when deleting over 1000 emails' do
it 'deletes in batches of 1000' do
Gitlab::Redis::Cache.with do |redis|
expect(redis).to receive(:pipelined).twice.and_call_original
expect(redis).to receive(:pipelined).at_least(2).and_call_original
end
described_class.delete_by_email(*(Array.new(1001) { |i| i }))
......
......@@ -35,13 +35,13 @@ def stub_storages(method, value)
# will be an extra SELECT command to choose the right database. We
# don't want to make the spec less precise, so we force that to
# happen (if needed) first, then clear the counts.
Gitlab::Redis::Cache.with { |redis| redis.info }
Gitlab::Redis::Sessions.with { |redis| redis.info }
RequestStore.clear!
stub_rails_env('staging') # to avoid raising CrossSlotError
Gitlab::Redis::Cache.with { |redis| redis.mset('cache-test', 321, 'cache-test-2', 321) }
Gitlab::Redis::Sessions.with { |redis| redis.mset('cache-test', 321, 'cache-test-2', 321) }
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
Gitlab::Redis::Cache.with { |redis| redis.mget('cache-test', 'cache-test-2') }
Gitlab::Redis::Sessions.with { |redis| redis.mget('cache-test', 'cache-test-2') }
end
Gitlab::Redis::SharedState.with { |redis| redis.set('shared-state-test', 123) }
end
......@@ -56,13 +56,13 @@ def stub_storages(method, value)
redis_read_bytes: be >= 0,
redis_write_bytes: be >= 0,
# Cache results
redis_cache_calls: 2,
redis_cache_cross_slot_calls: 1,
redis_cache_allowed_cross_slot_calls: 1,
redis_cache_duration_s: be >= 0,
redis_cache_read_bytes: be >= 0,
redis_cache_write_bytes: be >= 0,
# Queues results
redis_sessions_calls: 2,
redis_sessions_cross_slot_calls: 1,
redis_sessions_allowed_cross_slot_calls: 1,
redis_sessions_duration_s: be >= 0,
redis_sessions_read_bytes: be >= 0,
redis_sessions_write_bytes: be >= 0,
# Shared state results
redis_shared_state_calls: 1,
......
......@@ -41,9 +41,9 @@
context 'when Redis calls are made' do
it 'adds Redis data and omits Gitaly data' do
stub_rails_env('staging') # to avoid raising CrossSlotError
Gitlab::Redis::Cache.with { |redis| redis.mset('test-cache', 123, 'test-cache2', 123) }
Gitlab::Redis::Sessions.with { |redis| redis.mset('test-cache', 123, 'test-cache2', 123) }
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
Gitlab::Redis::Cache.with { |redis| redis.mget('cache-test', 'cache-test-2') }
Gitlab::Redis::Sessions.with { |redis| redis.mget('cache-test', 'cache-test-2') }
end
Gitlab::Redis::Queues.with { |redis| redis.set('test-queues', 321) }
......@@ -63,13 +63,13 @@
expect(payload[:redis_queues_read_bytes]).to be >= 0
expect(payload[:redis_queues_write_bytes]).to be >= 0
# Cache payload
expect(payload[:redis_cache_calls]).to eq(2)
expect(payload[:redis_cache_cross_slot_calls]).to eq(1)
expect(payload[:redis_cache_allowed_cross_slot_calls]).to eq(1)
expect(payload[:redis_cache_duration_s]).to be >= 0
expect(payload[:redis_cache_read_bytes]).to be >= 0
expect(payload[:redis_cache_write_bytes]).to be >= 0
# Sessions payload
expect(payload[:redis_sessions_calls]).to eq(2)
expect(payload[:redis_sessions_cross_slot_calls]).to eq(1)
expect(payload[:redis_sessions_allowed_cross_slot_calls]).to eq(1)
expect(payload[:redis_sessions_duration_s]).to be >= 0
expect(payload[:redis_sessions_read_bytes]).to be >= 0
expect(payload[:redis_sessions_write_bytes]).to be >= 0
# Gitaly
expect(payload[:gitaly_calls]).to be_nil
......
......@@ -16,7 +16,11 @@
describe '#read_multi_mget' do
it 'runs multi-key command if no cross-slot command is expected' do
Rails.cache.redis.with do |redis|
expect(redis).not_to receive(:pipelined)
if Gitlab::Redis::ClusterUtil.cluster?(redis)
expect(redis).to receive(:pipelined).once.and_call_original
else
expect(redis).not_to receive(:pipelined)
end
end
expect(
......@@ -24,13 +28,15 @@
).to eq({ '{user1}:x' => 1, '{user1}:y' => 2, '{user1}:z' => 3 })
end
it 'skips patch if input is above 100' do
Rails.cache.redis.with do |redis|
expect(redis).not_to receive(:pipelined)
end
context 'when deleting large amount of keys' do
it 'batches get into pipelines of 100' do
Rails.cache.redis.with do |redis|
expect(redis).to receive(:pipelined).at_least(2).and_call_original
end
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
Rails.cache.read_multi(*Array.new(101) { |i| i })
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
Rails.cache.read_multi(*Array.new(101) { |i| i })
end
end
end
......@@ -38,19 +44,23 @@
it 'reads multiple keys' do
if patched
Rails.cache.redis.with do |redis|
expect(redis).to receive(:pipelined).once.and_call_original
expect(redis).to receive(:pipelined).at_least(1).and_call_original
end
end
expect(::Feature).to receive(:enabled?)
.with(:feature_flag_state_logs, { default_enabled_if_undefined: nil, type: :ops })
.exactly(:once)
.and_call_original
expect(::Feature).to receive(:enabled?)
.with(:enable_rails_cache_pipeline_patch)
.exactly(:once)
.and_call_original
Gitlab::Redis::Cache.with do |redis|
unless Gitlab::Redis::ClusterUtil.cluster?(redis)
expect(::Feature).to receive(:enabled?)
.with(:feature_flag_state_logs, { default_enabled_if_undefined: nil, type: :ops })
.exactly(:once)
.and_call_original
expect(::Feature).to receive(:enabled?)
.with(:enable_rails_cache_pipeline_patch)
.exactly(:once)
.and_call_original
end
end
expect(
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
......@@ -65,7 +75,7 @@
shared_examples 'reading using non redis cache stores' do |klass|
it 'does not affect non Redis::Cache cache stores' do
klass.cache_store.redis.with do |redis|
expect(redis).not_to receive(:pipelined)
expect(redis).not_to receive(:pipelined) unless Gitlab::Redis::ClusterUtil.cluster?(redis)
end
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
......@@ -75,8 +85,8 @@
end
context 'when reading from non redis-cache stores' do
it_behaves_like 'reading using non redis cache stores', Gitlab::Redis::RepositoryCache
it_behaves_like 'reading using non redis cache stores', Gitlab::Redis::FeatureFlag
it_behaves_like 'reading using non redis cache stores', Gitlab::Redis::RepositoryCache
end
context 'when feature flag is disabled' do
......@@ -95,19 +105,23 @@
it 'deletes multiple keys' do
if patched
Rails.cache.redis.with do |redis|
expect(redis).to receive(:pipelined).once.and_call_original
expect(redis).to receive(:pipelined).at_least(1).and_call_original
end
end
expect(::Feature).to receive(:enabled?)
.with(:feature_flag_state_logs, { default_enabled_if_undefined: nil, type: :ops })
.exactly(:once)
.and_call_original
expect(::Feature).to receive(:enabled?)
.with(:enable_rails_cache_pipeline_patch)
.exactly(:once)
.and_call_original
Gitlab::Redis::Cache.with do |redis|
unless Gitlab::Redis::ClusterUtil.cluster?(redis)
expect(::Feature).to receive(:enabled?)
.with(:feature_flag_state_logs, { default_enabled_if_undefined: nil, type: :ops })
.exactly(:once)
.and_call_original
expect(::Feature).to receive(:enabled?)
.with(:enable_rails_cache_pipeline_patch)
.exactly(:once)
.and_call_original
end
end
expect(
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
......@@ -120,7 +134,7 @@
shared_examples 'deleting using non redis cache stores' do |klass|
it 'does not affect non Redis::Cache cache stores' do
klass.cache_store.redis.with do |redis|
expect(redis).not_to receive(:pipelined)
expect(redis).not_to receive(:pipelined) unless Gitlab::Redis::ClusterUtil.cluster?(redis)
end
Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
......@@ -130,8 +144,8 @@
end
context 'when deleting from non redis-cache stores' do
it_behaves_like 'deleting using non redis cache stores', Gitlab::Redis::RepositoryCache
it_behaves_like 'deleting using non redis cache stores', Gitlab::Redis::FeatureFlag
it_behaves_like 'deleting using non redis cache stores', Gitlab::Redis::RepositoryCache
end
context 'when deleting large amount of keys' do
......@@ -141,7 +155,9 @@
it 'calls pipeline multiple times' do
Rails.cache.redis.with do |redis|
expect(redis).to receive(:pipelined).twice.and_call_original
# no expectation on number of times as it could vary depending on cluster size
# if the Redis is a Redis Cluster
expect(redis).to receive(:pipelined).at_least(2).and_call_original
end
expect(
......@@ -154,7 +170,11 @@
it 'runs multi-key command if no cross-slot command is expected' do
Rails.cache.redis.with do |redis|
expect(redis).not_to receive(:pipelined)
if Gitlab::Redis::ClusterUtil.cluster?(redis)
expect(redis).to receive(:pipelined).once.and_call_original
else
expect(redis).not_to receive(:pipelined)
end
end
expect(
......
......@@ -75,7 +75,7 @@
it 'sends multiple pipelines of 1000 unlinks' do
Gitlab::Redis::Cache.with do |redis|
expect(redis).to receive(:pipelined).twice.and_call_original
expect(redis).to receive(:pipelined).at_least(2).and_call_original
end
cache.clear_cache!(cache_prefix)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment