Skip to content
Snippets Groups Projects
Verified Commit 4df8cc87 authored by Krasimir Angelov's avatar Krasimir Angelov :two: Committed by GitLab
Browse files

Merge branch 'cursor-bbm-testing' into 'master'

Add cursor batched background migration testing

See merge request !172446



Merged-by: default avatarKrasimir Angelov <kangelov@gitlab.com>
Approved-by: default avatarMatt Kasa <mkasa@gitlab.com>
Reviewed-by: default avatarKrasimir Angelov <kangelov@gitlab.com>
Co-authored-by: Simon Tomlinson's avatarSimon Tomlinson <stomlinson@gitlab.com>
parents a3fd684f 1a75e240
No related branches found
No related tags found
1 merge request!172446Add cursor batched background migration testing
Pipeline #1555558155 passed
......@@ -19,6 +19,10 @@ def run_job(job)
raise NotImplementedError, 'subclass must implement'
end
def print_job_progress(batch_name, job)
# Subclasses can implement to print job progress
end
def run_jobs(for_duration:)
jobs_to_run = jobs_by_migration_name
return if jobs_to_run.empty?
......@@ -36,6 +40,7 @@ def run_jobs(for_duration:)
private
def run_jobs_for_migration(migration_name:, jobs:, run_until:)
puts("Sampling jobs for #{migration_name}") # rubocop:disable Rails/Output -- This runs only in pipelines and should output to the pipeline log
per_background_migration_result_dir = File.join(@result_dir, migration_name)
instrumentation = Instrumentation.new(result_dir: per_background_migration_result_dir,
......@@ -46,10 +51,14 @@ def run_jobs_for_migration(migration_name:, jobs:, run_until:)
jobs.each do |j|
break if run_until <= Time.current
batch_name = batch_names.next
print_job_progress(batch_name, j)
meta = { job_meta: job_meta(j) }
instrumentation.observe(version: nil,
name: batch_names.next,
name: batch_name,
connection: connection,
meta: meta) do
run_job(j)
......
......@@ -14,6 +14,9 @@ def initialize(result_dir:, connection:, from_id:)
@from_id = from_id
end
# rubocop:disable Metrics/AbcSize -- This method is temporarily more complex while it deals with both cursor
# and non-cursor migrations. The complexity will significantly decrease when non-cursor migration support is
# removed.
def jobs_by_migration_name
set_shared_model_connection do
Gitlab::Database::BackgroundMigration::BatchedMigration
......@@ -22,73 +25,112 @@ def jobs_by_migration_name
.to_h do |migration|
batching_strategy = migration.batch_class.new(connection: connection)
smallest_batch_start = migration.next_min_value
is_cursor = migration.cursor?
table_max_value = define_batchable_model(migration.table_name, connection: connection)
.maximum(migration.column_name)
# Pretend every migration is a cursor migration. When actually running the job,
# we can unwrap the cursor if it is not.
cursor_columns = is_cursor ? migration.job_class.cursor_columns : [migration.column_name]
largest_batch_start = [table_max_value - migration.batch_size, smallest_batch_start].max
# Wrap the single result into an array (that we pretend is a cursor) if this
# is not a cursor migration. (next_min_value has an if check on cursor? and returns either array or int)
table_min_cursor = Array.wrap(migration.next_min_value)
# variance is the portion of the batch range that we shrink between variance * 0 and variance * 1
# to pick actual batches to sample.
variance = largest_batch_start - smallest_batch_start
batch_starts = uniform_fractions
.lazy # frac varies from 0 to 1, values in smallest_batch_start..largest_batch_start
.map { |frac| (variance * frac).to_i + smallest_batch_start }
# Track previously run batches so that we stop sampling if a new batch would intersect an older one
completed_batches = []
jobs_to_sample = batch_starts
# Stop sampling if a batch would intersect a previous batch
.take_while { |start| completed_batches.none? { |batch| batch.cover?(start) } }
.map do |batch_start|
# The current block is lazily evaluated as part of the jobs_to_sample enumerable
# so it executes after the enclosing using_connection block has already executed
# Therefore we need to re-associate with the explicit connection again
Gitlab::Database::SharedModel.using_connection(connection) do
next_bounds = batching_strategy.next_batch(
migration.table_name,
migration.column_name,
batch_min_value: batch_start,
batch_size: migration.batch_size,
job_class: migration.job_class,
job_arguments: migration.job_arguments
)
# If no rows match, the next_bounds are nil.
# This will only happen if there are zero rows to match from the current sampling point to the end
# of the table
# Simulate the approach in the actual background migration worker by not sampling a batch
# from this range.
# (The actual worker would finish the migration, but we may find batches that can be sampled elsewhere
# in the table)
if next_bounds.nil?
# If the migration has no work to do across the entire table, sampling can get stuck
# in a loop if we don't mark the attempted batches as completed
completed_batches << (batch_start..(batch_start + migration.batch_size))
next
end
ordering = cursor_columns.map { |c| { c => :desc } }
batch_min, batch_max = next_bounds
rows_ordered_backwards = define_batchable_model(migration.table_name, connection: connection)
.order(*ordering)
# If only one column, pluck.first returns a single value for that column instead of an array of
# all (1) column(s)
# So wrap the result for consistency between 1 and many columns
table_max_cursor = Array.wrap(rows_ordered_backwards.pick(*cursor_columns))
job = migration.create_batched_job!(batch_min, batch_max)
completed_batches << (batch_min..batch_max)
# variance is the portion of the batch range that we shrink between variance * 0 and variance * 1
# to pick actual batches to sample.
job
# Here we're going to do something that is explicitly WRONG, but good enough - we assume that we can
# just scale the first element of the cursor to get a reasonable percentage of the way through the table.
# This is really not true at all, but it's close enough for testing.
# For the rest of the components of our example cursors, we'll reuse parts of the end cursors for each
# batch for the start cursors of the next batch
variance = table_max_cursor[0] - table_min_cursor[0]
batch_first_elems = uniform_fractions.lazy.map { |frac| (variance * frac).to_i }
jobs_to_sample = Enumerator.new do |y|
completed_batches = []
# We construct the starting cursor from the end of the prev loop,
# or just the beginning of the table on the first loop
# This way, cursors for our batches start at interesting places in all of their positions
prev_end_cursor = table_min_cursor
loop do
first_elem = batch_first_elems.next
batch_start = [first_elem] + prev_end_cursor[1..]
break if completed_batches.any? { |batch| batch.cover?(batch_start) }
# The current block is lazily evaluated as part of the jobs_to_sample enumerable
# so it executes after the enclosing using_connection block has already executed
# Therefore we need to re-associate with the explicit connection again
Gitlab::Database::SharedModel.using_connection(connection) do
next_bounds = batching_strategy.next_batch(
migration.table_name,
migration.column_name,
batch_min_value: is_cursor ? batch_start : batch_start[0],
batch_size: migration.batch_size,
job_class: migration.job_class,
job_arguments: migration.job_arguments
)
# If no rows match, the next_bounds are nil.
# This will only happen if there are zero rows to match from the current sampling point to the end
# of the table
# Simulate the approach in the actual background migration worker by not sampling a batch
# from this range.
# (The actual worker would finish the migration, but we may find batches that can be sampled
# elsewhere in the table)
if next_bounds.nil?
# If the migration has no work to do across the entire table, sampling can get stuck
# in a loop if we don't mark the attempted batches as completed
# We need to guess a size for this. The batch size of the migration is way too big in all
# cases with a 2-element or more cursor, but it doesn't really matter so we just guess that.
synthetic_cursor_offset = migration.batch_size
batch_end = batch_start.dup
batch_end[0] += synthetic_cursor_offset
completed_batches << (batch_start..batch_end)
next
end
batch_min, batch_max = next_bounds
# These are ints if not a cursor, wrap them to maintain the illusion that everything is a cursor
job = migration.create_batched_job!(batch_min, batch_max)
# Wrap the batch min/max back as cursors if the migration was not cursor-based
batch_min = Array.wrap(batch_min)
batch_max = Array.wrap(batch_max)
# Save the max as cursor details for the next loop so that we test
# interesting cursor positions.
prev_end_cursor = batch_max
completed_batches << (batch_min..batch_max)
y << job
end
end
end.reject(&:nil?) # Remove skipped batches from the lazy list of batches to test
end
job_class_name = migration.job_class_name
export_migration_details(job_class_name, migration.slice(:interval, :total_tuple_count, :max_batch_size))
export_migration_details(job_class_name,
migration.slice(:interval, :total_tuple_count, :max_batch_size))
[job_class_name, jobs_to_sample]
end
end
end
# rubocop:enable Metrics/AbcSize
def run_job(job)
set_shared_model_connection do
......@@ -96,6 +138,16 @@ def run_job(job)
end
end
def print_job_progress(batch_name, job)
args_phrase = if job.batched_migration.cursor?
"#{job.min_cursor} - #{job.max_cursor}"
else
"#{job.min_value} - #{job.max_value}"
end
puts(" #{batch_name} (#{args_phrase})") # rubocop:disable Rails/Output -- This runs only in pipelines and should output to the pipeline log
end
def uniform_fractions
Enumerator.new do |y|
# Generates equally distributed fractions between 0 and 1, with increasing detail as more are pulled from
......
......@@ -32,7 +32,37 @@ def queue_migration(
sub_batch_size: sub_batch_size,
status_event: :execute,
max_batch_size: nil,
gitlab_schema: gitlab_schema
gitlab_schema: gitlab_schema,
pause_ms: 0
)
end
end
def queue_cursor_migration(
job_class_name,
batch_table_name,
batch_column_name,
min_cursor:,
max_cursor:,
batching_strategy:,
batch_size: Gitlab::Database::Migrations::BatchedBackgroundMigrationHelpers::BATCH_SIZE,
sub_batch_size: Gitlab::Database::Migrations::BatchedBackgroundMigrationHelpers::SUB_BATCH_SIZE
)
Gitlab::Database::SharedModel.using_connection(connection) do
Gitlab::Database::BackgroundMigration::BatchedMigration.create!(
gitlab_schema: gitlab_schema,
job_class_name: job_class_name,
job_arguments: [],
table_name: batch_table_name,
column_name: batch_column_name,
min_cursor: min_cursor,
max_cursor: max_cursor,
batch_class_name: batching_strategy,
batch_size: batch_size,
sub_batch_size: sub_batch_size,
pause_ms: 0,
interval: 5.minutes,
status_event: :execute
)
end
end
......@@ -47,163 +77,89 @@ def queue_migration(
with_them do
let(:result_dir) { Pathname.new(Dir.mktmpdir) }
let(:connection) { base_model.connection }
let(:table_name) { "_test_column_copying" }
let(:num_rows_in_table) { 1000 }
let(:from_id) { 0 }
after do
FileUtils.rm_rf(result_dir)
end
before do
connection.execute(<<~SQL)
CREATE TABLE #{table_name} (
id bigint primary key not null,
data bigint default 0
);
insert into #{table_name} (id) select i from generate_series(1, #{num_rows_in_table}) g(i);
SQL
end
context 'running a real background migration' do
let(:interval) { 5.minutes }
let(:params) { { version: nil, connection: connection } }
let(:migration_name) { 'CopyColumnUsingBackgroundMigrationJob' }
let(:migration_file_path) { result_dir.join('CopyColumnUsingBackgroundMigrationJob', 'details.json') }
let(:json_file) { Gitlab::Json.parse(File.read(migration_file_path)) }
let(:expected_file_keys) { %w[interval total_tuple_count max_batch_size] }
context 'not a cursor migration' do
let(:table_name) { "_test_column_copying_non_cursor" }
let(:from_id) { 0 }
before do
# job_interval is skipped when testing
queue_migration(migration_name, table_name, :id, :id, :data, batch_size: 100, job_interval: interval)
end
subject(:sample_migration) do
described_class.new(
result_dir: result_dir,
connection: connection,
from_id: from_id
).run_jobs(for_duration: 1.minute)
end
it 'runs sampled jobs from the batched background migration' do
# Expect that running sampling for this migration processes some of the rows. Sampling doesn't run
# over every row in the table, so this does not completely migrate the table.
expect { subject }.to change {
define_batchable_model(table_name, connection: connection)
.where('id IS DISTINCT FROM data').count
}.by_at_most(-1)
connection.execute(<<~SQL)
CREATE TABLE #{table_name} (
id bigint primary key not null,
data bigint default 0
);
insert into #{table_name} (id) select i from generate_series(1, #{num_rows_in_table}) g(i);
SQL
end
it 'uses the correct params to instrument the background migration' do
expect_next_instance_of(Gitlab::Database::Migrations::Instrumentation) do |instrumentation|
expect(instrumentation).to receive(:observe).with(hash_including(params)).at_least(:once).and_call_original
context 'running a real background migration' do
let(:interval) { 5.minutes }
let(:params) { { version: nil, connection: connection } }
let(:migration_name) { 'CopyColumnUsingBackgroundMigrationJob' }
let(:migration_file_path) { result_dir.join('CopyColumnUsingBackgroundMigrationJob', 'details.json') }
let(:json_file) { Gitlab::Json.parse(File.read(migration_file_path)) }
let(:expected_file_keys) { %w[interval total_tuple_count max_batch_size] }
before do
# job_interval is skipped when testing
queue_migration(migration_name, table_name, :id, :id, :data, batch_size: 100, job_interval: interval)
end
subject
end
it 'uses the filtering clause from the migration' do
expect_next_instance_of(Gitlab::BackgroundMigration::BatchingStrategies::PrimaryKeyBatchingStrategy) do |s|
expect(s).to receive(:filter_batch).at_least(:once).and_call_original
subject(:sample_migration) do
described_class.new(
result_dir: result_dir,
connection: connection,
from_id: from_id
).run_jobs(for_duration: 1.minute)
end
subject
end
it 'exports migration details to a file' do
subject
expect(json_file.keys).to match_array(expected_file_keys)
end
end
context 'with jobs to run' do
let(:migration_name) { 'TestBackgroundMigration' }
it 'samples jobs' do
calls = []
define_background_migration(migration_name) do |*args|
calls << args
it 'runs sampled jobs from the batched background migration' do
# Expect that running sampling for this migration processes some of the rows. Sampling doesn't run
# over every row in the table, so this does not completely migrate the table.
expect { subject }.to change {
define_batchable_model(table_name, connection: connection)
.where('id IS DISTINCT FROM data').count
}.by_at_most(-1)
end
queue_migration(
migration_name,
table_name,
:id,
job_interval: 5.minutes,
batch_size: 100
)
described_class.new(
result_dir: result_dir,
connection: connection,
from_id: from_id
).run_jobs(for_duration: 3.minutes)
expect(calls).not_to be_empty
end
it 'uses the correct params to instrument the background migration' do
expect_next_instance_of(Gitlab::Database::Migrations::Instrumentation) do |instrumentation|
expect(instrumentation).to receive(:observe).with(hash_including(params)).at_least(:once).and_call_original
end
it 'samples 1 job with a batch size higher than the table size' do
calls = []
define_background_migration(migration_name) do |*args|
travel 1.minute
calls << args
subject
end
queue_migration(
migration_name,
table_name, :id,
job_interval: 5.minutes,
batch_size: num_rows_in_table * 2,
sub_batch_size: num_rows_in_table * 2
)
described_class.new(
result_dir: result_dir,
connection: connection,
from_id: from_id
).run_jobs(for_duration: 3.minutes)
expect(calls.size).to eq(1)
end
it 'uses the filtering clause from the migration' do
expect_next_instance_of(Gitlab::BackgroundMigration::BatchingStrategies::PrimaryKeyBatchingStrategy) do |s|
expect(s).to receive(:filter_batch).at_least(:once).and_call_original
end
it 'does not sample a job if there are zero rows to sample' do
calls = []
define_background_migration(migration_name, scoping: ->(relation) {
relation.none
}) do |*args|
calls << args
subject
end
queue_migration(
migration_name,
table_name,
:id,
job_interval: 5.minutes,
batch_size: num_rows_in_table * 2,
sub_batch_size: num_rows_in_table * 2
)
described_class.new(
result_dir: result_dir,
connection: connection,
from_id: from_id
).run_jobs(for_duration: 3.minutes)
it 'exports migration details to a file' do
subject
expect(calls.count).to eq(0)
expect(json_file.keys).to match_array(expected_file_keys)
end
end
context 'with multiple jobs to run' do
let(:last_id) do
Gitlab::Database::SharedModel.using_connection(connection) do
Gitlab::Database::BackgroundMigration::BatchedMigration.maximum(:id)
context 'with jobs to run' do
let(:migration_name) { 'TestBackgroundMigration' }
it 'samples jobs' do
calls = []
define_background_migration(migration_name, with_base_class: true) do |*args|
calls << args
end
end
it 'runs all pending jobs based on the last migration id' do
old_migration = define_background_migration(migration_name)
queue_migration(
migration_name,
table_name,
......@@ -212,49 +168,222 @@ def queue_migration(
batch_size: 100
)
last_id
new_migration = define_background_migration('NewMigration') { travel 1.second }
described_class.new(
result_dir: result_dir,
connection: connection,
from_id: from_id
).run_jobs(for_duration: 3.minutes)
expect(calls).not_to be_empty
end
it 'samples 1 job with a batch size higher than the table size' do
calls = []
define_background_migration(migration_name, with_base_class: true) do |*args|
travel 1.minute
calls << args
end
queue_migration(
'NewMigration',
table_name,
:id,
migration_name,
table_name, :id,
job_interval: 5.minutes,
batch_size: 10,
sub_batch_size: 5
batch_size: num_rows_in_table * 2,
sub_batch_size: num_rows_in_table * 2
)
other_new_migration = define_background_migration('NewMigration2') { travel 2.seconds }
described_class.new(
result_dir: result_dir,
connection: connection,
from_id: from_id
).run_jobs(for_duration: 3.minutes)
expect(calls.size).to eq(1)
end
it 'does not sample a job if there are zero rows to sample' do
calls = []
define_background_migration(migration_name, scoping: ->(relation) {
relation.none
}) do |*args|
calls << args
end
queue_migration(
'NewMigration2',
migration_name,
table_name,
:id,
job_interval: 5.minutes,
batch_size: 10,
sub_batch_size: 5
batch_size: num_rows_in_table * 2,
sub_batch_size: num_rows_in_table * 2
)
expect_migration_runs(new_migration => 3, other_new_migration => 2, old_migration => 0) do
described_class.new(
result_dir: result_dir,
connection: connection,
from_id: last_id
).run_jobs(for_duration: 5.seconds)
described_class.new(
result_dir: result_dir,
connection: connection,
from_id: from_id
).run_jobs(for_duration: 3.minutes)
expect(calls.count).to eq(0)
end
context 'with multiple jobs to run' do
let(:last_id) do
Gitlab::Database::SharedModel.using_connection(connection) do
Gitlab::Database::BackgroundMigration::BatchedMigration.maximum(:id)
end
end
it 'runs all pending jobs based on the last migration id', :aggregate_failures do
old_migration = define_background_migration(migration_name, with_base_class: true)
queue_migration(
migration_name,
table_name,
:id,
job_interval: 5.minutes,
batch_size: 100
)
last_id
new_migration = define_background_migration('NewMigration', with_base_class: true) { travel 1.second }
queue_migration(
'NewMigration',
table_name,
:id,
job_interval: 5.minutes,
batch_size: 10,
sub_batch_size: 5
)
other_new_migration = define_background_migration('NewMigration2', with_base_class: true) do
travel 2.seconds
end
queue_migration(
'NewMigration2',
table_name,
:id,
job_interval: 5.minutes,
batch_size: 10,
sub_batch_size: 5
)
expect_migration_runs(new_migration => 3, other_new_migration => 2, old_migration => 0) do
described_class.new(
result_dir: result_dir,
connection: connection,
from_id: last_id
).run_jobs(for_duration: 5.seconds)
end
end
end
end
context 'choosing uniform batches to run' do
subject { described_class.new(result_dir: result_dir, connection: connection, from_id: from_id) }
describe '#uniform_fractions' do
it 'generates evenly distributed sequences of fractions' do
received = subject.uniform_fractions.take(9)
expected = [0, 1, 1.0 / 2, 1.0 / 4, 3.0 / 4, 1.0 / 8, 3.0 / 8, 5.0 / 8, 7.0 / 8]
# All the fraction numerators are small integers, and all denominators are powers of 2, so these
# fit perfectly into floating point numbers with zero loss of precision
expect(received).to eq(expected)
end
end
end
end
context 'choosing uniform batches to run' do
subject { described_class.new(result_dir: result_dir, connection: connection, from_id: from_id) }
context 'a cursor migration' do
let(:table_name) { "_test_column_copying_cursor" }
let(:background_migration_job_class) do
define_background_migration('TestCursorMigration', with_base_class: true, block_context: :migration,
cursor_columns: [:id_a, :id_b]) do
each_sub_batch do |relation|
# Want to relation.update_all(backfilled: )
# But rails doesn't know what to use as the primary key when transforming that to
# UPDATE .. WHERE <pk> IN (subquery) because the primary key is composite
# So it generates invalid sql UPDATE ... WHERE <table_name>."" IN (subquery)
# Instead build our own
connection.execute(<<~SQL)
UPDATE #{batch_table}
SET data = data + 1
WHERE (id_a, id_b) IN (#{relation.select(:id_a, :id_b).to_sql})
SQL
end
end
end
let(:from_cursor) { [0, 0] }
before do
connection.execute(<<~SQL)
CREATE TABLE #{table_name} (
id_a bigint not null,
id_b bigint not null,
data bigint default 0,
primary key (id_a, id_b)
);
insert into #{table_name} (id_a, id_b) select i / 10, i % 10 from generate_series(1, #{num_rows_in_table}) g(i);
SQL
# job_interval is skipped when testing
queue_cursor_migration(
background_migration_job_class.name.demodulize,
table_name, :id_a,
min_cursor: [0, 0],
max_cursor: [100, 10],
batching_strategy: 'PrimaryKeyBatchingStrategy',
batch_size: 173,
sub_batch_size: 37
)
end
subject(:sample_migration) do
described_class.new(
result_dir: result_dir,
connection: connection,
from_id: 0
).run_jobs(for_duration: 1.minute)
end
context 'running a real background migration' do
let(:migration_file_path) { result_dir.join(background_migration_job_class.name.demodulize, 'details.json') }
let(:json_file) { Gitlab::Json.parse(File.read(migration_file_path)) }
let(:params) { { version: nil, connection: connection } }
let(:expected_file_keys) { %w[interval total_tuple_count max_batch_size] }
it 'runs sampled jobs from the batched background migration' do
# Expect that running sampling for this migration processes some of the rows. Sampling doesn't run
# over every row in the table, so this does not completely migrate the table.
expect { subject }.to change {
define_batchable_model(table_name, connection: connection)
.where('data = 0').count
}.by_at_most(-1)
end
it 'uses the correct params to instrument the background migration' do
expect_next_instance_of(Gitlab::Database::Migrations::Instrumentation) do |instrumentation|
expect(instrumentation).to receive(:observe).with(hash_including(params)).at_least(:once).and_call_original
end
subject
end
it 'uses the filtering clause from the migration' do
pending("Cursor migrations do not support filtering clauses yet")
expect_next_instance_of(Gitlab::BackgroundMigration::BatchingStrategies::PrimaryKeyBatchingStrategy) do |s|
expect(s).to receive(:filter_batch).at_least(:once).and_call_original
end
subject
end
describe '#uniform_fractions' do
it 'generates evenly distributed sequences of fractions' do
received = subject.uniform_fractions.take(9)
expected = [0, 1, 1.0 / 2, 1.0 / 4, 3.0 / 4, 1.0 / 8, 3.0 / 8, 5.0 / 8, 7.0 / 8]
it 'exports migration details to a file' do
subject
# All the fraction numerators are small integers, and all denominators are powers of 2, so these
# fit perfectly into floating point numbers with zero loss of precision
expect(received).to eq(expected)
expect(json_file.keys).to match_array(expected_file_keys)
end
end
end
......
......@@ -2,15 +2,25 @@
module Database
module MigrationTestingHelpers
def define_background_migration(name, with_base_class: true, scoping: nil)
def define_background_migration(
name, with_base_class: true, scoping: nil, block_context: :test,
cursor_columns: nil, &block)
raise "block_context must be :test or :migration" unless [:test, :migration].include?(block_context)
klass = Class.new(with_base_class ? Gitlab::BackgroundMigration::BatchedMigrationJob : Object) do
operation_name :update if with_base_class
# Can't simply def perform here as we won't have access to the block,
# similarly can't define_method(:perform, &block) here as it would change the block receiver
define_method(:perform) { |*args| yield(*args) }
if block_context == :test
# Can't simply def perform here as we won't have access to the block,
# similarly can't define_method(:perform, &block) here as it would change the block receiver
define_method(:perform) { |*args| yield(*args) }
elsif block_context == :migration
define_method(:perform, &block)
end
scope_to(scoping) if scoping
cursor(*cursor_columns) if cursor_columns
end
stub_const("Gitlab::BackgroundMigration::#{name}", klass)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment