Skip to content
Snippets Groups Projects
Verified Commit 44449068 authored by Tianwen Chen's avatar Tianwen Chen :two: Committed by GitLab
Browse files
parent 4a0ea7dd
No related branches found
No related tags found
1 merge request!146759Swap columns upstream_pipeline_id for p_ci_builds
Showing
with 643 additions and 60 deletions
......@@ -3161,7 +3161,6 @@ RSpec/FeatureCategory:
- 'spec/lib/gitlab/database/partitioning/single_numeric_list_partition_spec.rb'
- 'spec/lib/gitlab/database/partitioning/time_partition_spec.rb'
- 'spec/lib/gitlab/database/partitioning_migration_helpers/backfill_partitioned_table_spec.rb'
- 'spec/lib/gitlab/database/partitioning_migration_helpers/index_helpers_spec.rb'
- 'spec/lib/gitlab/database/pg_class_spec.rb'
- 'spec/lib/gitlab/database/postgres_constraint_spec.rb'
- 'spec/lib/gitlab/database/postgres_hll/batch_distinct_counter_spec.rb'
......
# frozen_string_literal: true
class SwapColumnsForUpstreamPipelineIdBetweenCiBuildsAndCiPipelines < Gitlab::Database::Migration[2.2]
include ::Gitlab::Database::MigrationHelpers::Swapping
include ::Gitlab::Database::PartitioningMigrationHelpers
disable_ddl_transaction!
milestone '16.11'
TABLE = :p_ci_builds
REFERENCING_TABLE = :ci_pipelines
COLUMNS = [
{ name: :upstream_pipeline_id_convert_to_bigint, old_name: :upstream_pipeline_id },
{ name: :commit_id_convert_to_bigint, old_name: :commit_id }
]
TRIGGER_FUNCTION = :trigger_10ee1357e825
FKS = [
{ name: :fk_6b6c3f3e70, column: :upstream_pipeline_id_convert_to_bigint, old_name: :fk_87f4cefcda },
{ name: :fk_8d588a7095, column: :commit_id_convert_to_bigint, old_name: :fk_d3130c9a7f }
]
INDEXES = [
{
name: :p_ci_builds_upstream_pipeline_id_bigint_idx,
columns: [:upstream_pipeline_id_convert_to_bigint],
options: { where: 'upstream_pipeline_id_convert_to_bigint IS NOT NULL' },
old_name: :p_ci_builds_upstream_pipeline_id_idx
},
{
name: :p_ci_builds_commit_id_bigint_artifacts_expire_at_id_idx,
columns: [:commit_id_convert_to_bigint, :artifacts_expire_at, :id],
options: {
where: "type::text = 'Ci::Build'::text AND (retried = false OR retried IS NULL) AND (name::text = ANY (ARRAY['sast'::character varying::text, 'secret_detection'::character varying::text, 'dependency_scanning'::character varying::text, 'container_scanning'::character varying::text, 'dast'::character varying::text]))" # rubocop:disable Layout/LineLength -- Where clause is just too long.
},
old_name: :p_ci_builds_commit_id_artifacts_expire_at_id_idx
},
{
name: :p_ci_builds_commit_id_bigint_stage_idx_created_at_idx,
columns: [:commit_id_convert_to_bigint, :stage_idx, :created_at],
old_name: :p_ci_builds_commit_id_stage_idx_created_at_idx
},
{
name: :p_ci_builds_commit_id_bigint_status_type_idx,
columns: [:commit_id_convert_to_bigint, :status, :type],
old_name: :p_ci_builds_commit_id_status_type_idx
},
{
name: :p_ci_builds_commit_id_bigint_type_name_ref_idx,
columns: [:commit_id_convert_to_bigint, :type, :name, :ref],
old_name: :p_ci_builds_commit_id_type_name_ref_idx
},
{
name: :p_ci_builds_commit_id_bigint_type_ref_idx,
columns: [:commit_id_convert_to_bigint, :type, :ref],
old_name: :p_ci_builds_commit_id_type_ref_idx
},
{
name: :p_ci_builds_resource_group_id_status_commit_id_bigint_idx,
columns: [:resource_group_id, :status, :commit_id_convert_to_bigint],
options: { where: 'resource_group_id IS NOT NULL' },
old_name: :p_ci_builds_resource_group_id_status_commit_id_idx
}
]
def up
# rubocop:disable Migration/WithLockRetriesDisallowedMethod -- custom implementation
with_lock_retries(raise_on_exhaustion: true) do
swap
remove_integer_indexes_and_foreign_keys_and_rename_bigint
end
# rubocop:enable Migration/WithLockRetriesDisallowedMethod
end
def down
recover_integer_indexes_and_foreign_keys
# rubocop:disable Migration/WithLockRetriesDisallowedMethod -- custom implementation
with_lock_retries(raise_on_exhaustion: true) do
swap
swap_indexes_and_foreign_keys
end
# rubocop:enable Migration/WithLockRetriesDisallowedMethod
end
private
def swap
lock_tables(REFERENCING_TABLE, TABLE)
COLUMNS.each do |column|
swap_columns(TABLE, column[:name], column[:old_name])
end
reset_trigger_function(TRIGGER_FUNCTION)
end
def remove_integer_indexes_and_foreign_keys_and_rename_bigint
FKS.each do |fk_metadata|
remove_foreign_key_if_exists(TABLE, REFERENCING_TABLE, column: fk_metadata[:column], reverse_lock_order: true)
rename_partitioned_foreign_key(TABLE, fk_metadata[:name], fk_metadata[:old_name])
end
INDEXES.each do |index_metadata|
old_index_name = old_index_name_from(index_metadata)
if old_index_name.nil?
rename_partitioned_index(TABLE, index_metadata[:name], index_metadata[:old_name])
else
if old_index_name != index_metadata[:old_name]
# rename the index to the name we expect
execute "ALTER INDEX #{old_index_name} RENAME TO #{index_metadata[:old_name]}"
end
swap_partitioned_indexes(TABLE, index_metadata[:name], index_metadata[:old_name])
end
remove_index(TABLE, name: index_metadata[:name], if_exists: true) # rubocop:disable Migration/RemoveIndex -- same as remove_concurrent_partitioned_index_by_name
end
end
def swap_indexes_and_foreign_keys
FKS.each do |fk_metadata|
swap_partitioned_foreign_keys(TABLE, fk_metadata[:name], fk_metadata[:old_name])
end
INDEXES.each do |index_metadata|
swap_partitioned_indexes(TABLE, index_metadata[:name], index_metadata[:old_name])
end
end
def recover_integer_indexes_and_foreign_keys
INDEXES.each do |index_metadata|
add_concurrent_partitioned_index(
TABLE, index_metadata[:columns],
name: index_metadata[:name], **index_metadata.fetch(:options, {})
)
end
FKS.each do |fk_metadata|
add_concurrent_partitioned_foreign_key(
TABLE, REFERENCING_TABLE,
column: fk_metadata[:column], name: fk_metadata[:name], on_delete: :cascade, reverse_lock_order: true
)
end
end
def old_index_name_from(index_metadata)
return index_metadata[:old_name] if index_name_exists?(TABLE, index_metadata[:old_name])
old_index_columns = index_metadata[:columns].map(&:to_s)
existing_old_index =
indexes(TABLE).find { |index| index.columns == old_index_columns }
existing_old_index.name.to_sym if existing_old_index.present?
end
end
3c94292cc29e491ed6d88ad7ea2c76308aa184d95278e7a34bc0b9a8d9e348b7
\ No newline at end of file
......@@ -890,7 +890,7 @@ CREATE TABLE p_ci_builds (
started_at timestamp without time zone,
runner_id integer,
coverage double precision,
commit_id integer,
commit_id_convert_to_bigint integer,
name character varying,
options text,
allow_failure boolean DEFAULT false NOT NULL,
......@@ -919,7 +919,7 @@ CREATE TABLE p_ci_builds (
failure_reason integer,
scheduled_at timestamp with time zone,
token_encrypted character varying,
upstream_pipeline_id integer,
upstream_pipeline_id_convert_to_bigint integer,
resource_group_id bigint,
waiting_for_resource_at timestamp with time zone,
processed boolean,
......@@ -929,12 +929,12 @@ CREATE TABLE p_ci_builds (
partition_id bigint NOT NULL,
auto_canceled_by_partition_id bigint DEFAULT 100 NOT NULL,
auto_canceled_by_id bigint,
commit_id_convert_to_bigint bigint,
commit_id bigint,
erased_by_id_convert_to_bigint bigint,
project_id_convert_to_bigint bigint,
runner_id_convert_to_bigint bigint,
trigger_request_id_convert_to_bigint bigint,
upstream_pipeline_id_convert_to_bigint bigint,
upstream_pipeline_id bigint,
user_id_convert_to_bigint bigint,
CONSTRAINT check_1e2fbd1b39 CHECK ((lock_version IS NOT NULL))
)
......@@ -5872,7 +5872,7 @@ CREATE TABLE ci_builds (
started_at timestamp without time zone,
runner_id integer,
coverage double precision,
commit_id integer,
commit_id_convert_to_bigint integer,
name character varying,
options text,
allow_failure boolean DEFAULT false NOT NULL,
......@@ -5901,7 +5901,7 @@ CREATE TABLE ci_builds (
failure_reason integer,
scheduled_at timestamp with time zone,
token_encrypted character varying,
upstream_pipeline_id integer,
upstream_pipeline_id_convert_to_bigint integer,
resource_group_id bigint,
waiting_for_resource_at timestamp with time zone,
processed boolean,
......@@ -5911,12 +5911,12 @@ CREATE TABLE ci_builds (
partition_id bigint NOT NULL,
auto_canceled_by_partition_id bigint DEFAULT 100 NOT NULL,
auto_canceled_by_id bigint,
commit_id_convert_to_bigint bigint,
commit_id bigint,
erased_by_id_convert_to_bigint bigint,
project_id_convert_to_bigint bigint,
runner_id_convert_to_bigint bigint,
trigger_request_id_convert_to_bigint bigint,
upstream_pipeline_id_convert_to_bigint bigint,
upstream_pipeline_id bigint,
user_id_convert_to_bigint bigint,
CONSTRAINT check_1e2fbd1b39 CHECK ((lock_version IS NOT NULL))
);
......@@ -24056,18 +24056,6 @@ CREATE INDEX idx_vulnerability_reads_project_id_scanner_id_vulnerability_id ON v
 
CREATE UNIQUE INDEX idx_work_item_types_on_namespace_id_and_name_null_namespace ON work_item_types USING btree (btrim(lower(name)), ((namespace_id IS NULL))) WHERE (namespace_id IS NULL);
 
CREATE INDEX p_ci_builds_commit_id_bigint_artifacts_expire_at_id_idx ON ONLY p_ci_builds USING btree (commit_id_convert_to_bigint, artifacts_expire_at, id) WHERE (((type)::text = 'Ci::Build'::text) AND ((retried = false) OR (retried IS NULL)) AND ((name)::text = ANY (ARRAY[('sast'::character varying)::text, ('secret_detection'::character varying)::text, ('dependency_scanning'::character varying)::text, ('container_scanning'::character varying)::text, ('dast'::character varying)::text])));
CREATE INDEX index_357cc39ca4 ON ci_builds USING btree (commit_id_convert_to_bigint, artifacts_expire_at, id) WHERE (((type)::text = 'Ci::Build'::text) AND ((retried = false) OR (retried IS NULL)) AND ((name)::text = ANY (ARRAY[('sast'::character varying)::text, ('secret_detection'::character varying)::text, ('dependency_scanning'::character varying)::text, ('container_scanning'::character varying)::text, ('dast'::character varying)::text])));
CREATE INDEX p_ci_builds_upstream_pipeline_id_bigint_idx ON ONLY p_ci_builds USING btree (upstream_pipeline_id_convert_to_bigint) WHERE (upstream_pipeline_id_convert_to_bigint IS NOT NULL);
CREATE INDEX index_89477d6012 ON ci_builds USING btree (upstream_pipeline_id_convert_to_bigint) WHERE (upstream_pipeline_id_convert_to_bigint IS NOT NULL);
CREATE INDEX p_ci_builds_commit_id_bigint_status_type_idx ON ONLY p_ci_builds USING btree (commit_id_convert_to_bigint, status, type);
CREATE INDEX index_8c07a79c70 ON ci_builds USING btree (commit_id_convert_to_bigint, status, type);
CREATE INDEX index_abuse_events_on_abuse_report_id ON abuse_events USING btree (abuse_report_id);
 
CREATE INDEX index_abuse_events_on_category_and_source ON abuse_events USING btree (category, source);
......@@ -24304,10 +24292,6 @@ CREATE INDEX index_batched_jobs_on_batched_migration_id_and_status ON batched_ba
 
CREATE UNIQUE INDEX index_batched_migrations_on_gl_schema_and_unique_configuration ON batched_background_migrations USING btree (gitlab_schema, job_class_name, table_name, column_name, job_arguments);
 
CREATE INDEX p_ci_builds_resource_group_id_status_commit_id_bigint_idx ON ONLY p_ci_builds USING btree (resource_group_id, status, commit_id_convert_to_bigint) WHERE (resource_group_id IS NOT NULL);
CREATE INDEX index_bc23fb9243 ON ci_builds USING btree (resource_group_id, status, commit_id_convert_to_bigint) WHERE (resource_group_id IS NOT NULL);
CREATE INDEX index_board_assignees_on_assignee_id ON board_assignees USING btree (assignee_id);
 
CREATE UNIQUE INDEX index_board_assignees_on_board_id_and_assignee_id ON board_assignees USING btree (board_id, assignee_id);
......@@ -24986,10 +24970,6 @@ CREATE UNIQUE INDEX index_customer_relations_contacts_on_unique_email_per_group
 
CREATE UNIQUE INDEX index_cycle_analytics_stage_event_hashes_on_hash_sha_256 ON analytics_cycle_analytics_stage_event_hashes USING btree (hash_sha256);
 
CREATE INDEX p_ci_builds_commit_id_bigint_stage_idx_created_at_idx ON ONLY p_ci_builds USING btree (commit_id_convert_to_bigint, stage_idx, created_at);
CREATE INDEX index_d46de3aa4f ON ci_builds USING btree (commit_id_convert_to_bigint, stage_idx, created_at);
CREATE UNIQUE INDEX index_daily_build_group_report_results_unique_columns ON ci_daily_build_group_report_results USING btree (project_id, ref_path, date, group_name);
 
CREATE UNIQUE INDEX index_dast_pre_scan_verifications_on_ci_pipeline_id ON dast_pre_scan_verifications USING btree (ci_pipeline_id);
......@@ -25328,14 +25308,6 @@ CREATE UNIQUE INDEX index_external_audit_event_destinations_on_namespace_id ON a
 
CREATE UNIQUE INDEX index_external_pull_requests_on_project_and_branches ON external_pull_requests USING btree (project_id, source_branch, target_branch);
 
CREATE INDEX p_ci_builds_commit_id_bigint_type_ref_idx ON ONLY p_ci_builds USING btree (commit_id_convert_to_bigint, type, ref);
CREATE INDEX index_fc42f73fa6 ON ci_builds USING btree (commit_id_convert_to_bigint, type, ref);
CREATE INDEX p_ci_builds_commit_id_bigint_type_name_ref_idx ON ONLY p_ci_builds USING btree (commit_id_convert_to_bigint, type, name, ref);
CREATE INDEX index_feafb4d370 ON ci_builds USING btree (commit_id_convert_to_bigint, type, name, ref);
CREATE UNIQUE INDEX index_feature_flag_scopes_on_flag_id_and_environment_scope ON operations_feature_flag_scopes USING btree (feature_flag_id, environment_scope);
 
CREATE UNIQUE INDEX index_feature_flags_clients_on_project_id_and_token_encrypted ON operations_feature_flags_clients USING btree (project_id, token_encrypted);
......@@ -29428,16 +29400,8 @@ ALTER INDEX p_ci_stages_pkey ATTACH PARTITION ci_stages_pkey;
 
ALTER INDEX p_ci_job_artifacts_job_id_file_type_partition_id_idx ATTACH PARTITION idx_ci_job_artifacts_on_job_id_file_type_and_partition_id_uniq;
 
ALTER INDEX p_ci_builds_commit_id_bigint_artifacts_expire_at_id_idx ATTACH PARTITION index_357cc39ca4;
ALTER INDEX p_ci_builds_upstream_pipeline_id_bigint_idx ATTACH PARTITION index_89477d6012;
ALTER INDEX p_ci_builds_commit_id_bigint_status_type_idx ATTACH PARTITION index_8c07a79c70;
ALTER INDEX p_ci_builds_runner_id_bigint_id_idx ATTACH PARTITION index_adafd086ad;
 
ALTER INDEX p_ci_builds_resource_group_id_status_commit_id_bigint_idx ATTACH PARTITION index_bc23fb9243;
ALTER INDEX p_ci_builds_metadata_build_id_idx ATTACH PARTITION index_ci_builds_metadata_on_build_id_and_has_exposed_artifacts;
 
ALTER INDEX p_ci_builds_metadata_build_id_id_idx ATTACH PARTITION index_ci_builds_metadata_on_build_id_and_id_and_interruptible;
......@@ -29518,12 +29482,6 @@ ALTER INDEX p_ci_stages_pipeline_id_name_partition_id_idx ATTACH PARTITION index
 
ALTER INDEX p_ci_stages_project_id_idx ATTACH PARTITION index_ci_stages_on_project_id;
 
ALTER INDEX p_ci_builds_commit_id_bigint_stage_idx_created_at_idx ATTACH PARTITION index_d46de3aa4f;
ALTER INDEX p_ci_builds_commit_id_bigint_type_ref_idx ATTACH PARTITION index_fc42f73fa6;
ALTER INDEX p_ci_builds_commit_id_bigint_type_name_ref_idx ATTACH PARTITION index_feafb4d370;
ALTER INDEX p_ci_builds_user_id_name_idx ATTACH PARTITION index_partial_ci_builds_on_user_id_name_parser_features;
 
ALTER INDEX p_ci_pipeline_variables_pipeline_id_key_partition_id_idx ATTACH PARTITION index_pipeline_variables_on_pipeline_id_key_partition_id_unique;
......@@ -30080,9 +30038,6 @@ ALTER TABLE p_ci_builds
ALTER TABLE ONLY merge_requests
ADD CONSTRAINT fk_6a5165a692 FOREIGN KEY (milestone_id) REFERENCES milestones(id) ON DELETE SET NULL;
 
ALTER TABLE p_ci_builds
ADD CONSTRAINT fk_6b6c3f3e70 FOREIGN KEY (upstream_pipeline_id_convert_to_bigint) REFERENCES ci_pipelines(id) ON DELETE CASCADE;
ALTER TABLE ONLY ai_agent_versions
ADD CONSTRAINT fk_6c2f682587 FOREIGN KEY (agent_id) REFERENCES ai_agents(id) ON DELETE CASCADE;
 
......@@ -30245,9 +30200,6 @@ ALTER TABLE ONLY work_item_dates_sources
ALTER TABLE ONLY bulk_import_exports
ADD CONSTRAINT fk_8c6f33cebe FOREIGN KEY (group_id) REFERENCES namespaces(id) ON DELETE CASCADE;
 
ALTER TABLE p_ci_builds
ADD CONSTRAINT fk_8d588a7095 FOREIGN KEY (commit_id_convert_to_bigint) REFERENCES ci_pipelines(id) ON DELETE CASCADE;
ALTER TABLE ONLY raw_usage_data
ADD CONSTRAINT fk_8e21125854 FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE;
 
......@@ -6,6 +6,7 @@ module PartitioningMigrationHelpers
module ForeignKeyHelpers
include ::Gitlab::Database::SchemaHelpers
include ::Gitlab::Database::Migrations::LockRetriesHelpers
include ::Gitlab::Database::MigrationHelpers::Swapping
ERROR_SCOPE = 'foreign keys'
......@@ -106,6 +107,34 @@ def validate_partitioned_foreign_key(source, column, name: nil)
end
end
# Rename the foreign key for partitioned table and its partitions.
#
# Example:
#
# rename_partitioned_foreign_key :users, 'existing_partitioned_fk_name', 'new_fk_name'
def rename_partitioned_foreign_key(table_name, old_foreign_key, new_foreign_key)
partitioned_table = find_partitioned_table(table_name)
partitioned_table.postgres_partitions.order(:name).each do |partition|
rename_constraint(partition.identifier, old_foreign_key, new_foreign_key)
end
rename_constraint(partitioned_table.name, old_foreign_key, new_foreign_key)
end
# Swap the foreign key names for partitioned table and its partitions.
#
# Example:
#
# swap_partitioned_foreign_keys :users, 'existing_partitioned_fk_name_1', 'existing_partitioned_fk_name_2'
def swap_partitioned_foreign_keys(table_name, old_foreign_key, new_foreign_key)
partitioned_table = find_partitioned_table(table_name)
partitioned_table.postgres_partitions.order(:name).each do |partition|
swap_foreign_keys(partition.identifier, old_foreign_key, new_foreign_key)
end
swap_foreign_keys(partitioned_table.name, old_foreign_key, new_foreign_key)
end
private
# Returns the name for a concurrent partitioned foreign key.
......
......@@ -6,6 +6,7 @@ module PartitioningMigrationHelpers
module IndexHelpers
include Gitlab::Database::MigrationHelpers
include Gitlab::Database::SchemaHelpers
include Gitlab::Database::MigrationHelpers::Swapping
DuplicatedIndexesError = Class.new(StandardError)
......@@ -79,6 +80,52 @@ def remove_concurrent_partitioned_index_by_name(table_name, index_name)
end
end
# Rename the index for partitioned table and its partitions.
# The `new_index_name` will be the new name of the partitioned index.
# The new name of the partition indexes will be generated by the partition table name and `new_index_name`
# and look like e.g. `index_000925dbd7`.
#
# Example:
#
# rename_partitioned_index :users, 'existing_partitioned_index_name', 'new_index_name'
def rename_partitioned_index(table_name, old_index_name, new_index_name)
partitioned_table = find_partitioned_table(table_name)
old_index_json = index_json(find_index(partitioned_table.name) { |i| i.name == old_index_name.to_s })
partitioned_table.postgres_partitions.order(:name).each do |partition|
old_partition_index_name = find_index(partition.identifier) { |i| index_json(i) == old_index_json }.name
new_partition_index_name = generated_index_name(partition.identifier, new_index_name)
rename_index_with_schema(
partition.identifier, old_partition_index_name, new_partition_index_name, schema: partition.schema
)
end
rename_index_with_schema(partitioned_table.name, old_index_name, new_index_name)
end
# Swap the index names for partitioned table and its partitions.
#
# Example:
#
# swap_partitioned_indexes :users, 'existing_partitioned_index_name_1', 'existing_partitioned_index_name_2'
def swap_partitioned_indexes(table_name, old_index_name, new_index_name)
partitioned_table = find_partitioned_table(table_name)
old_index_json = index_json(find_index(partitioned_table.name) { |i| i.name == old_index_name.to_s })
new_index_json = index_json(find_index(partitioned_table.name) { |i| i.name == new_index_name.to_s })
partitioned_table.postgres_partitions.order(:name).each do |partition|
old_partition_index_name = find_index(partition.identifier) { |i| index_json(i) == old_index_json }.name
new_partition_index_name = find_index(partition.identifier) { |i| index_json(i) == new_index_json }.name
swap_indexes(
partition.identifier, old_partition_index_name, new_partition_index_name, schema: partition.schema
)
end
swap_indexes(partitioned_table.name, old_index_name, new_index_name)
end
# Finds duplicate indexes for a given schema and table. This finds
# indexes where the index definition is identical but the names are
# different. Returns an array of arrays containing duplicate index name
......@@ -174,6 +221,15 @@ def rename_indexes(from, to, schema_name: connection.current_schema)
connection.execute(statements.join(';'))
end
def find_index(table_name, &block)
indexes(table_name).find(&block) || \
raise(ArgumentError, "Could not find index for #{table_name}")
end
def index_json(index_definition)
index_definition.as_json.except('table', 'name', 'comment')
end
end
end
end
......
......@@ -11,7 +11,9 @@
let(:source_table_name) { '_test_partitioned_table' }
let(:target_table_name) { '_test_referenced_table' }
let(:second_target_table_name) { '_test_second_referenced_table' }
let(:column_name) { "#{target_table_name}_id" }
let(:second_column_name) { "#{second_target_table_name}_id" }
let(:foreign_key_name) { '_test_partitioned_fk' }
let(:partition_schema) { 'gitlab_partitions_dynamic' }
let(:partition1_name) { "#{partition_schema}.#{source_table_name}_202001" }
......@@ -35,17 +37,27 @@
before do
allow(migration).to receive(:puts)
allow(migration).to receive(:with_lock_retries).and_yield
allow(migration).to receive(:transaction_open?).and_return(false)
connection.execute(<<~SQL)
DROP TABLE IF EXISTS #{target_table_name};
CREATE TABLE #{target_table_name} (
id serial NOT NULL,
PRIMARY KEY (id)
);
DROP TABLE IF EXISTS #{second_target_table_name};
CREATE TABLE #{second_target_table_name} (
id serial NOT NULL,
PRIMARY KEY (id)
);
DROP TABLE IF EXISTS #{source_table_name};
CREATE TABLE #{source_table_name} (
id serial NOT NULL,
#{column_name} int NOT NULL,
#{second_column_name} int NOT NULL,
created_at timestamptz NOT NULL,
PRIMARY KEY (id, created_at)
) PARTITION BY RANGE (created_at);
......@@ -63,8 +75,6 @@
allow(migration).to receive(:foreign_key_exists?)
.with(source_table_name, target_table_name, anything)
.and_return(false)
allow(migration).to receive(:with_lock_retries).and_yield
end
context 'when the foreign key does not exist on the parent table' do
......@@ -292,4 +302,140 @@ def expect_add_concurrent_fk(source_table_name, target_table_name, options)
end
end
end
shared_examples 'raising undefined object error' do
specify do
expect { execute }.to raise_error(
ActiveRecord::StatementInvalid,
/PG::UndefinedObject: ERROR: constraint "#{foreign_key_for_error}" for table .* does not exist/
)
end
end
describe '#rename_partitioned_foreign_key' do
subject(:execute) { migration.rename_partitioned_foreign_key(source_table_name, old_foreign_key, new_foreign_key) }
let(:old_foreign_key) { foreign_key_name }
let(:new_foreign_key) { :_test_partitioned_fk_new }
context 'when old foreign key exists' do
before do
migration.add_concurrent_partitioned_foreign_key(
source_table_name, target_table_name, column: column_name, name: old_foreign_key
)
end
context 'when new foreign key does not exists' do
it 'renames the old foreign key into the new name' do
expect { execute }
.to change { foreign_key_by_name(source_table_name, old_foreign_key) }.from(be_present).to(nil)
.and change { foreign_key_by_name(partition1_name, old_foreign_key) }.from(be_present).to(nil)
.and change { foreign_key_by_name(partition2_name, old_foreign_key) }.from(be_present).to(nil)
.and change { foreign_key_by_name(source_table_name, new_foreign_key) }.from(nil).to(be_present)
.and change { foreign_key_by_name(partition1_name, new_foreign_key) }.from(nil).to(be_present)
.and change { foreign_key_by_name(partition2_name, new_foreign_key) }.from(nil).to(be_present)
end
end
context 'when new foreign key exists' do
before do
migration.add_concurrent_partitioned_foreign_key(
source_table_name, target_table_name, column: column_name, name: new_foreign_key
)
end
it 'raises duplicate object error' do
expect { execute }.to raise_error(
ActiveRecord::StatementInvalid,
/PG::DuplicateObject: ERROR: constraint "#{new_foreign_key}" for relation .* already exists/
)
end
end
end
context 'when old foreign key does not exist' do
context 'when new foreign key does not exists' do
let(:foreign_key_for_error) { old_foreign_key }
it_behaves_like 'raising undefined object error'
end
context 'when new foreign key exists' do
before do
migration.add_concurrent_partitioned_foreign_key(
source_table_name, target_table_name, column: column_name, name: new_foreign_key
)
end
let(:foreign_key_for_error) { old_foreign_key }
it_behaves_like 'raising undefined object error'
end
end
end
describe '#swap_partitioned_foreign_keys' do
subject(:execute) { migration.swap_partitioned_foreign_keys(source_table_name, old_foreign_key, new_foreign_key) }
let(:old_foreign_key) { foreign_key_name }
let(:new_foreign_key) { :_test_partitioned_fk_new }
context 'when old foreign key exists' do
before do
migration.add_concurrent_partitioned_foreign_key(
source_table_name, target_table_name, column: column_name, name: old_foreign_key
)
end
context 'when new foreign key does not exists' do
let(:foreign_key_for_error) { new_foreign_key }
it_behaves_like 'raising undefined object error'
end
context 'when new foreign key exists' do
before do
migration.add_concurrent_partitioned_foreign_key(
source_table_name, target_table_name, column: second_column_name, name: new_foreign_key
)
end
it 'swaps foreign keys' do
expect { execute }
.to change { foreign_key_by_name(source_table_name, old_foreign_key).column }
.from(column_name).to(second_column_name)
.and change { foreign_key_by_name(partition1_name, old_foreign_key).column }
.from(column_name).to(second_column_name)
.and change { foreign_key_by_name(partition2_name, old_foreign_key).column }
.from(column_name).to(second_column_name)
.and change { foreign_key_by_name(source_table_name, new_foreign_key).column }
.from(second_column_name).to(column_name)
.and change { foreign_key_by_name(partition1_name, new_foreign_key).column }
.from(second_column_name).to(column_name)
.and change { foreign_key_by_name(partition2_name, new_foreign_key).column }
.from(second_column_name).to(column_name)
end
end
end
context 'when old foreign key does not exist' do
context 'when new foreign key does not exists' do
let(:foreign_key_for_error) { old_foreign_key }
it_behaves_like 'raising undefined object error'
end
context 'when new foreign key exists' do
before do
migration.add_concurrent_partitioned_foreign_key(
source_table_name, target_table_name, column: second_column_name, name: new_foreign_key
)
end
let(:foreign_key_for_error) { old_foreign_key }
it_behaves_like 'raising undefined object error'
end
end
end
end
......@@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::IndexHelpers do
RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::IndexHelpers, feature_category: :database do
include Database::TableSchemaHelpers
let(:migration) do
......@@ -11,18 +11,23 @@
let(:table_name) { '_test_partitioned_table' }
let(:column_name) { 'created_at' }
let(:second_column_name) { 'updated_at' }
let(:index_name) { '_test_partitioning_index_name' }
let(:second_index_name) { '_test_second_partitioning_index_name' }
let(:partition_schema) { 'gitlab_partitions_dynamic' }
let(:partition1_identifier) { "#{partition_schema}.#{table_name}_202001" }
let(:partition2_identifier) { "#{partition_schema}.#{table_name}_202002" }
let(:partition1_index) { "index_#{table_name}_202001_#{column_name}" }
let(:partition2_index) { "index_#{table_name}_202002_#{column_name}" }
let(:second_partition1_index) { "index_#{table_name}_202001_#{second_column_name}" }
let(:second_partition2_index) { "index_#{table_name}_202002_#{second_column_name}" }
before do
allow(migration).to receive(:puts)
allow(migration).to receive(:transaction_open?).and_return(false)
connection.execute(<<~SQL)
DROP TABLE IF EXISTS #{table_name};
CREATE TABLE #{table_name} (
id serial NOT NULL,
created_at timestamptz NOT NULL,
......@@ -30,9 +35,11 @@
PRIMARY KEY (id, created_at)
) PARTITION BY RANGE (created_at);
DROP TABLE IF EXISTS #{partition1_identifier};
CREATE TABLE #{partition1_identifier} PARTITION OF #{table_name}
FOR VALUES FROM ('2020-01-01') TO ('2020-02-01');
DROP TABLE IF EXISTS #{partition2_identifier};
CREATE TABLE #{partition2_identifier} PARTITION OF #{table_name}
FOR VALUES FROM ('2020-02-01') TO ('2020-03-01');
SQL
......@@ -398,4 +405,158 @@ def expect_add_concurrent_index_and_call_original(table, column, index)
end
end
end
shared_examples 'raising undefined object error' do
specify do
expect { execute }.to raise_error(
ArgumentError,
/Could not find index for _test_partitioned_table/
)
end
end
describe '#rename_partitioned_index' do
subject(:execute) { migration.rename_partitioned_index(table_name, old_index_name, new_index_name) }
let(:old_index_name) { index_name }
let(:new_index_name) { :_test_partitioning_index_name_new }
before do
allow(migration.connection).to receive(:transaction_open?).and_return(false)
end
context 'when old index exists' do
before do
create_old_partitioned_index
end
context 'when new index does not exists' do
it 'renames the old index into the new name' do
expect { execute }
.to change { index_by_name(table_name, old_index_name) }.from(be_present).to(nil)
.and change { index_by_name(partition1_identifier, old_index_name, partitioned_table: table_name) }
.from(be_present).to(nil)
.and change { index_by_name(partition2_identifier, old_index_name, partitioned_table: table_name) }
.from(be_present).to(nil)
.and change { index_by_name(table_name, new_index_name) }.from(nil).to(be_present)
.and change { index_by_name(partition1_identifier, new_index_name, partitioned_table: table_name) }
.from(nil).to(be_present)
.and change { index_by_name(partition2_identifier, new_index_name, partitioned_table: table_name) }
.from(nil).to(be_present)
end
end
context 'when new index exists' do
before do
create_new_partitioned_index
end
it 'raises duplicate table error' do
expect { execute }.to raise_error(
ActiveRecord::StatementInvalid,
/PG::DuplicateTable: ERROR: .*"#{new_index_name}".* exists/
)
end
end
end
context 'when old index does not exist' do
context 'when new index does not exists' do
it_behaves_like 'raising undefined object error'
end
context 'when new index exists' do
before do
connection.execute(<<~SQL)
CREATE INDEX #{second_partition1_index} ON #{partition1_identifier} (#{second_column_name});
CREATE INDEX #{second_partition2_index} ON #{partition2_identifier} (#{second_column_name});
CREATE INDEX #{new_index_name} ON #{table_name} (#{second_column_name});
SQL
end
it_behaves_like 'raising undefined object error'
end
end
end
describe '#swap_partitioned_indexes' do
subject(:execute) { migration.swap_partitioned_indexes(table_name, old_index_name, new_index_name) }
let(:old_index_name) { index_name }
let(:new_index_name) { :_test_partitioning_index_name_new }
before do
allow(migration.connection).to receive(:transaction_open?).and_return(false)
end
context 'when old index exists' do
before do
create_old_partitioned_index
end
context 'when new index does not exists' do
it_behaves_like 'raising undefined object error'
end
context 'when new index exists' do
before do
create_new_partitioned_index
end
it 'swaps indexs' do
expect { execute }
.to change { index_by_name(table_name, old_index_name).columns }
.from(match_array(column_name)).to(match_array(second_column_name))
.and change { index_by_name(partition1_identifier, old_index_name, partitioned_table: table_name).columns }
.from(match_array(column_name)).to(match_array(second_column_name))
.and change { index_by_name(partition2_identifier, old_index_name, partitioned_table: table_name).columns }
.from(match_array(column_name)).to(match_array(second_column_name))
.and change { index_by_name(table_name, new_index_name).columns }
.from(match_array(second_column_name)).to(match_array(column_name))
.and change { index_by_name(partition1_identifier, new_index_name, partitioned_table: table_name).columns }
.from(match_array(second_column_name)).to(match_array(column_name))
.and change { index_by_name(partition2_identifier, new_index_name, partitioned_table: table_name).columns }
.from(match_array(second_column_name)).to(match_array(column_name))
end
end
end
context 'when old index does not exist' do
context 'when new index does not exists' do
it_behaves_like 'raising undefined object error'
end
context 'when new index exists' do
before do
connection.execute(<<~SQL)
CREATE INDEX #{second_partition1_index} ON #{partition1_identifier} (#{second_column_name});
CREATE INDEX #{second_partition2_index} ON #{partition2_identifier} (#{second_column_name});
CREATE INDEX #{new_index_name} ON #{table_name} (#{second_column_name});
SQL
end
it_behaves_like 'raising undefined object error'
end
end
end
def create_old_partitioned_index
connection.execute(<<~SQL)
CREATE INDEX #{partition1_index} ON #{partition1_identifier} (#{column_name});
CREATE INDEX #{partition2_index} ON #{partition2_identifier} (#{column_name});
CREATE INDEX #{old_index_name} ON #{table_name} (#{column_name});
SQL
end
def create_new_partitioned_index
connection.execute(<<~SQL)
CREATE INDEX #{second_partition1_index} ON #{partition1_identifier} (#{second_column_name});
CREATE INDEX #{second_partition2_index} ON #{partition2_identifier} (#{second_column_name});
CREATE INDEX #{new_index_name} ON #{table_name} (#{second_column_name});
SQL
end
end
# frozen_string_literal: true
require 'spec_helper'
require_migration!
RSpec.describe SwapColumnsForUpstreamPipelineIdBetweenCiBuildsAndCiPipelines, feature_category: :continuous_integration do
it_behaves_like(
'swap conversion columns',
table_name: :p_ci_builds,
from: :upstream_pipeline_id,
to: :upstream_pipeline_id_convert_to_bigint,
before_type: 'integer',
after_type: 'bigint'
)
it_behaves_like(
'swap conversion columns',
table_name: :p_ci_builds,
from: :commit_id,
to: :commit_id_convert_to_bigint,
before_type: 'integer',
after_type: 'bigint'
)
context 'when index is different' do
let(:migration_connection) do
klass = Class.new(Gitlab::Database::Migration[2.2]) { milestone '16.11' }
klass.new.tap do |migration|
migration.extend Gitlab::Database::PartitioningMigrationHelpers
end
end
before do
migration_connection.execute(<<~SQL)
DROP INDEX IF EXISTS p_ci_builds_commit_id_artifacts_expire_at_id_convert_to_big_idx;
DROP INDEX IF EXISTS p_ci_builds_commit_id_artifacts_expire_at_id_idx;
SQL
migration_connection.add_concurrent_partitioned_index(
:p_ci_builds,
[:commit_id, :artifacts_expire_at, :id],
name: :p_ci_builds_commit_id_artifacts_expire_at_id_convert_to_big_idx,
where: "(((type)::text = 'Ci::Build'::text) AND ((retried = false) OR (retried IS NULL)) AND ((name)::text = ANY (ARRAY[('sast'::character varying)::text, ('secret_detection'::character varying)::text, ('dependency_scanning'::character varying)::text, ('container_scanning'::character varying)::text, ('dast'::character varying)::text])))" # rubocop:disable Layout/LineLength -- Just too long
)
end
it_behaves_like(
'swap conversion columns',
table_name: :p_ci_builds,
from: :upstream_pipeline_id,
to: :upstream_pipeline_id_convert_to_bigint,
before_type: 'integer',
after_type: 'bigint'
)
it_behaves_like(
'swap conversion columns',
table_name: :p_ci_builds,
from: :commit_id,
to: :commit_id_convert_to_bigint,
before_type: 'integer',
after_type: 'bigint'
)
end
end
......@@ -155,6 +155,27 @@ def foreign_key_exists_by_name(table_name, foreign_key_name, schema: nil)
SQL
end
def foreign_key_by_name(source, name)
connection.foreign_keys(source).find do |key|
key.name == name.to_s
end
end
def index_by_name(table, name, partitioned_table: nil)
if partitioned_table
partitioned_index = index_by_name(partitioned_table, name)
return unless partitioned_index
connection.indexes(table).find do |key|
key.columns == partitioned_index.columns
end
else
connection.indexes(table).find do |key|
key.name == name.to_s
end
end
end
def check_constraint_definition(table_name, constraint_name, schema: nil)
table_name = schema ? "#{schema}.#{table_name}" : table_name
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment