Skip to content
Commits on Source (78)
......@@ -38,6 +38,7 @@ stages:
- test
- integration
- release
- child jobs
- document
.go-pg-version-matrix:
......
......@@ -26,7 +26,7 @@ middleware:storage-googlecdn:
variables:
TEST_SHORT_FLAG: ""
- when: always
script: $GO_TEST -timeout=$TEST_TIMEOUT -v -coverprofile=coverage.out -tags=$BUILDTAGS $PACKAGE -args -check.v $TEST_SHORT_FLAG
script: $GO_TEST -timeout=$TEST_TIMEOUT -v -coverprofile=coverage.out -tags=$BUILDTAGS $PACKAGE $TEST_SHORT_FLAG
filesystem:
<<: *storage-driver-test
......@@ -41,7 +41,7 @@ inmemory:
PACKAGE: 'github.com/docker/distribution/registry/storage/driver/inmemory'
# Always run short tests for in-memory driver or we might run out of memory
# and cause a flaky test https://gitlab.com/gitlab-org/container-registry/-/issues/1177
script: $GO_TEST -timeout=$TEST_TIMEOUT -v -coverprofile=coverage.out -tags=$BUILDTAGS $PACKAGE -args -check.v -test.short
script: $GO_TEST -timeout=$TEST_TIMEOUT -v -coverprofile=coverage.out -tags=$BUILDTAGS $PACKAGE -test.short
s3-aws:
<<: *storage-driver-test
......
stages:
- main
dry-run:
image: node:lts
stage: main
needs: []
variables:
GIT_COMMITTER_EMAIL: $GITLAB_TOKEN_EMAIL
script:
- make release-tools
- npx semantic-release --dry-run
#NOTE(prozlach): Specifying rules is necessary to make child-pipelines work
rules:
- if: $CI_PIPELINE_SOURCE == "parent_pipeline"
cut:
image: node:lts
stage: main
needs: [ "dry-run" ]
variables:
GIT_COMMITTER_EMAIL: $GITLAB_TOKEN_EMAIL
script:
- make release-tools
- npx semantic-release
#NOTE(prozlach): Specifying rules is necessary to make child-pipelines work
rules:
- if: $CI_PIPELINE_SOURCE == "parent_pipeline"
when: manual
dry-run:
image: node:lts
stage: release
needs: []
variables:
GIT_COMMITTER_EMAIL: $GITLAB_TOKEN_EMAIL
trigger release:
stage: child jobs
trigger:
include:
- local: .gitlab/ci/release-trigger.yml
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
script:
- make release-tools
- npx semantic-release --dry-run
cut:
image: node:lts
stage: release
needs: [ "dry-run" ]
variables:
GIT_COMMITTER_EMAIL: $GITLAB_TOKEN_EMAIL
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
when: manual
script:
- make release-tools
- npx semantic-release
publish:
stage: release
......
......@@ -28,7 +28,8 @@ Related to <!-- add the issue URL here -->
- [ ] I added unit tests
- Documentation:
- [ ] [Documentation is not required](https://about.gitlab.com/handbook/engineering/ux/technical-writing/workflow/#when-documentation-is-required)
- [ ] I added [documentation](https://docs.gitlab.com/ee/development/documentation/workflow.html)
- [ ] I added [documentation](https://docs.gitlab.com/ee/development/documentation/workflow.html)
- [ ] I created or linked to an existing issue for every added or updated `TODO`, `BUG`, `FIXME` or `OPTIMIZE` prefixed comment
- ~database changes including schema migrations:
- [ ] Change does not introduce database changes
- MR includes DB chagnes
......
......@@ -97,16 +97,6 @@ linters-settings:
settings: # settings passed to gocritic
captLocal: # must be valid enabled check name
paramsOnly: true
godox:
# report any comments starting with keywords, this is useful for TODO or FIXME comments that
# might be left in the code accidentally and should be resolved before merging
keywords: # default keywords are TODO, BUG, and FIXME, these can be overwritten by this setting
- TODO
- BUG
- FIXME
- NOTE
- OPTIMIZE # marks code that should be optimized before merging
- HACK # marks hack-arounds that should be removed before merging
dogsled:
# checks assignments with too many blank identifiers; default is 2
max-blank-identifiers: 2
......@@ -150,7 +140,6 @@ linters:
- gocognit
- goconst
- gocritic
- godox
- gofumpt
- goimports
- gosec
......
# Versions of registry dependencies managed by asdf in gdk build.
golang 1.22.8 1.23.2
## [4.11.0](https://gitlab.com/gitlab-org/container-registry/compare/v4.10.0-gitlab...v4.11.0-gitlab) (2024-10-21)
### ✨ Features ✨
* add filesystem lock file ([dbdd7ba](https://gitlab.com/gitlab-org/container-registry/commit/dbdd7bafebfd8eb2e69a006939ab034b46c5a997))
* add manifest ID to FK violation error message ([12f7b1a](https://gitlab.com/gitlab-org/container-registry/commit/12f7b1ad00f3e99defb9721e33d0f931216efd79))
* gracefully handle DLB replica resolve/connection failures ([14aeb62](https://gitlab.com/gitlab-org/container-registry/commit/14aeb62eb4c8f826682a5df56a88d25c4ac35bd6))
### 🐛 Bug Fixes 🐛
* ensure consistency of DLB primary LSN records ([42da9b1](https://gitlab.com/gitlab-org/container-registry/commit/42da9b13765b7860827216b66fedfb7785c5c14e))
* fix path traversal for inmemory storage ([f8951ec](https://gitlab.com/gitlab-org/container-registry/commit/f8951ec144bd2cab0295a9ba941830767d130814))
### ⚙️ Build ⚙️
* add gdk build dependencies for asdf ([8c93d76](https://gitlab.com/gitlab-org/container-registry/commit/8c93d76a95f90d95390ea80256d73e3a22552bd7))
* **deps:** update module cloud.google.com/go/storage to v1.44.0 ([637bdbb](https://gitlab.com/gitlab-org/container-registry/commit/637bdbbdcda058d5e6e5572f8262a4b91d9bf2d3))
* **deps:** update module github.com/getsentry/sentry-go to v0.29.1 ([df968f0](https://gitlab.com/gitlab-org/container-registry/commit/df968f058748d750a0c1605a228b497a3ea8a30e))
* **deps:** update module github.com/prometheus/client_golang to v1.20.5 ([6d4bd3f](https://gitlab.com/gitlab-org/container-registry/commit/6d4bd3f6311002cf5aa914bda982b2d5295a8a3e))
* **deps:** update module github.com/redis/go-redis/v9 to v9.6.2 ([d40ec8f](https://gitlab.com/gitlab-org/container-registry/commit/d40ec8f56a3dd1eca5b53a52f9bf9796c088289e))
* **deps:** update module github.com/redis/go-redis/v9 to v9.7.0 ([500f98b](https://gitlab.com/gitlab-org/container-registry/commit/500f98bcad306718df1018ea9c80915bf3b51c91))
* **deps:** update module github.com/schollz/progressbar/v3 to v3.16.1 ([0405c6e](https://gitlab.com/gitlab-org/container-registry/commit/0405c6e494a8535eaefe24e586cdd84b55b5e8b8))
* **deps:** update module github.com/shopify/toxiproxy/v2 to v2.11.0 ([0b1621d](https://gitlab.com/gitlab-org/container-registry/commit/0b1621dea4ed56a2b4bdcab36ae9190ac503081b))
* **deps:** update module github.com/xanzy/go-gitlab to v0.110.0 ([156e38b](https://gitlab.com/gitlab-org/container-registry/commit/156e38bc153d20ea1bfb93aaa17740ea19f121c2))
* **deps:** update module github.com/xanzy/go-gitlab to v0.112.0 ([9448fc0](https://gitlab.com/gitlab-org/container-registry/commit/9448fc07ff8c9d04966940a39c712001fe5ceb1c))
* **deps:** update module gitlab.com/gitlab-org/labkit to v1.21.2 ([ec08f8d](https://gitlab.com/gitlab-org/container-registry/commit/ec08f8d9ebfac50e50e7aa1cc95c91f4630f594c))
* **deps:** update module golang.org/x/crypto to v0.28.0 ([37f68d6](https://gitlab.com/gitlab-org/container-registry/commit/37f68d612cf41ad4e73e2a67cd7677128b2eda09))
* **deps:** update module golang.org/x/time to v0.7.0 ([cdc3201](https://gitlab.com/gitlab-org/container-registry/commit/cdc3201a9c4526cc162c6ac4133142c6a028a327))
* **deps:** update module google.golang.org/api to v0.200.0 ([466e2ea](https://gitlab.com/gitlab-org/container-registry/commit/466e2ead363ada430233192ac9a78a6402995a10))
* **deps:** update module google.golang.org/api to v0.201.0 ([e83cb2a](https://gitlab.com/gitlab-org/container-registry/commit/e83cb2a0444ced5909cc5e466cd7c9913fd66e28))
## [4.10.0](https://gitlab.com/gitlab-org/container-registry/compare/v4.9.0-gitlab...v4.10.0-gitlab) (2024-10-3)
......
......@@ -477,14 +477,6 @@ type DatabaseLoadBalancing struct {
Port int `yaml:"port"`
// Record is the SRV DNS record to look up. This option is required for service discovery to work.
Record string `yaml:"record"`
// RecordCheckInterval is the interval to check the DNS record.
RecordCheckInterval time.Duration `yaml:"recordcheckinterval"`
// DisconnectTimeout is the time after which an old connection is closed, after the list of hosts was updated.
DisconnectTimeout time.Duration `yaml:"disconnecttimeout"`
// MaxReplicaLagTime is the maximum time a replica can be behind the primary before being quarantined.
MaxReplicaLagTime time.Duration `yaml:"maxreplicalagtime"`
// MaxReplicaLagBytes is the maximum number of bytes a replica can be behind the primary before being quarantined.
MaxReplicaLagBytes int `yaml:"maxreplicalagbytes"`
// ReplicaCheckInterval is the minimum amount of time between checking the status of a replica.
ReplicaCheckInterval time.Duration `yaml:"replicacheckinterval"`
}
......@@ -1018,7 +1010,7 @@ type Endpoint struct {
Backoff time.Duration `yaml:"backoff"` // backoff duration
IgnoredMediaTypes []string `yaml:"ignoredmediatypes"` // target media types to ignore
Ignore Ignore `yaml:"ignore"` // ignore event types
QueuePurgeTimeout time.Duration `yaml:"queuepurgetimeout"` // the amount of time registry tries to sent unsent notifications in the buffer after it received SIGINT
QueuePurgeTimeout time.Duration `yaml:"queuepurgetimeout"` // the amount of time registry tries to sent unsent notifications in the buffer after it received SIGINT
}
// Events configures notification events.
......@@ -1185,18 +1177,6 @@ func ApplyDefaults(config *Configuration) {
if config.Database.LoadBalancing.Port == 0 {
config.Database.LoadBalancing.Port = defaultDLBPort
}
if config.Database.LoadBalancing.RecordCheckInterval == 0 {
config.Database.LoadBalancing.RecordCheckInterval = defaultDLBRecordCheckInterval
}
if config.Database.LoadBalancing.DisconnectTimeout == 0 {
config.Database.LoadBalancing.DisconnectTimeout = defaultDLBDisconnectTimeout
}
if config.Database.LoadBalancing.MaxReplicaLagBytes == 0 {
config.Database.LoadBalancing.MaxReplicaLagBytes = defaultDLBMaxReplicaLagBytes
}
if config.Database.LoadBalancing.MaxReplicaLagTime == 0 {
config.Database.LoadBalancing.MaxReplicaLagTime = defaultDLBMaxReplicaLagTime
}
if config.Database.LoadBalancing.ReplicaCheckInterval == 0 {
config.Database.LoadBalancing.ReplicaCheckInterval = defaultDLBReplicaCheckInterval
}
......
......@@ -12,14 +12,11 @@ import (
"time"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
. "gopkg.in/check.v1"
"gopkg.in/yaml.v2"
)
// Hook up gocheck into the "go test" runner
func Test(t *testing.T) { TestingT(t) }
// configStruct is a canonical example configuration, which should map to configYamlV0_1
var configStruct = Configuration{
Version: "0.1",
......@@ -76,10 +73,6 @@ var configStruct = Configuration{
Nameserver: "localhost",
Port: 8600,
Record: "db-replica-registry.service.consul",
RecordCheckInterval: 1 * time.Minute,
DisconnectTimeout: 2 * time.Minute,
MaxReplicaLagTime: 1 * time.Minute,
MaxReplicaLagBytes: 8388608,
ReplicaCheckInterval: 1 * time.Minute,
},
},
......@@ -261,39 +254,44 @@ database:
jobinterval: 1m
`
func TestConfigSuite(t *testing.T) {
suite.Run(t, new(ConfigSuite))
}
type ConfigSuite struct {
suite.Suite
expectedConfig *Configuration
}
var _ = Suite(new(ConfigSuite))
func (suite *ConfigSuite) SetUpTest(c *C) {
func (suite *ConfigSuite) SetupTest() {
os.Clearenv()
suite.expectedConfig = copyConfig(configStruct)
}
// TestMarshalRoundtrip validates that configStruct can be marshaled and
// unmarshaled without changing any parameters
func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) {
func (suite *ConfigSuite) TestMarshalRoundtrip() {
configBytes, err := yaml.Marshal(suite.expectedConfig)
c.Assert(err, IsNil)
require.NoError(suite.T(), err)
config, err := Parse(bytes.NewReader(configBytes))
c.Log(string(configBytes))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
suite.T().Log(string(configBytes))
require.NoError(suite.T(), err)
require.Equal(suite.T(), suite.expectedConfig, config)
}
// TestParseSimple validates that configYamlV0_1 can be parsed into a struct
// matching configStruct
func (suite *ConfigSuite) TestParseSimple(c *C) {
func (suite *ConfigSuite) TestParseSimple() {
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), suite.expectedConfig, config)
}
// TestParseInmemory validates that configuration yaml with storage provided as
// a string can be parsed into a Configuration struct with no storage parameters
func (suite *ConfigSuite) TestParseInmemory(c *C) {
func (suite *ConfigSuite) TestParseInmemory() {
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
suite.expectedConfig.Database = Database{
Enabled: true,
......@@ -307,17 +305,17 @@ func (suite *ConfigSuite) TestParseInmemory(c *C) {
suite.expectedConfig.Log.Fields = nil
config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), suite.expectedConfig, config)
}
// TestParseIncomplete validates that an incomplete yaml configuration cannot
// be parsed without providing environment variables to fill in the missing
// components.
func (suite *ConfigSuite) TestParseIncomplete(c *C) {
func (suite *ConfigSuite) TestParseIncomplete() {
incompleteConfigYaml := "version: 0.1"
_, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml)))
c.Assert(err, NotNil)
require.Error(suite.T(), err)
suite.expectedConfig.Log.Fields = nil
suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}}
......@@ -335,28 +333,28 @@ func (suite *ConfigSuite) TestParseIncomplete(c *C) {
os.Setenv("REGISTRY_AUTH_SILLY_REALM", "silly")
config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), suite.expectedConfig, config)
}
// TestParseWithSameEnvStorage validates that providing environment variables
// that match the given storage type will only include environment-defined
// parameters and remove yaml-defined parameters
func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) {
func (suite *ConfigSuite) TestParseWithSameEnvStorage() {
suite.expectedConfig.Storage = Storage{"s3": Parameters{"region": "us-east-1"}}
os.Setenv("REGISTRY_STORAGE", "s3")
os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), suite.expectedConfig, config)
}
// TestParseWithDifferentEnvStorageParams validates that providing environment variables that change
// and add to the given storage parameters will change and add parameters to the parsed
// Configuration struct
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) {
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams() {
suite.expectedConfig.Storage.setParameter("region", "us-west-1")
suite.expectedConfig.Storage.setParameter("secure", true)
suite.expectedConfig.Storage.setParameter("newparam", "some Value")
......@@ -366,26 +364,26 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) {
os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), suite.expectedConfig, config)
}
// TestParseWithDifferentEnvStorageType validates that providing an environment variable that
// changes the storage type will be reflected in the parsed Configuration struct
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) {
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType() {
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
os.Setenv("REGISTRY_STORAGE", "inmemory")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), suite.expectedConfig, config)
}
// TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable
// that changes the storage type will be reflected in the parsed Configuration struct and that
// environment storage parameters will also be included
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) {
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams() {
suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}}
suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot")
......@@ -393,56 +391,56 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) {
os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), suite.expectedConfig, config)
}
// TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log
// level to the same as the one provided in the yaml will not change the parsed Configuration struct
func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) {
func (suite *ConfigSuite) TestParseWithSameEnvLoglevel() {
os.Setenv("REGISTRY_LOGLEVEL", "info")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), suite.expectedConfig, config)
}
// TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the
// log level will override the value provided in the yaml document
func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) {
func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel() {
suite.expectedConfig.Log.Level = "error"
os.Setenv("REGISTRY_LOG_LEVEL", "error")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), suite.expectedConfig, config)
}
// TestParseInvalidLoglevel validates that the parser will fail to parse a
// configuration if the loglevel is malformed
func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) {
func (suite *ConfigSuite) TestParseInvalidLoglevel() {
invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory"
_, err := Parse(bytes.NewReader([]byte(invalidConfigYaml)))
c.Assert(err, NotNil)
require.Error(suite.T(), err)
os.Setenv("REGISTRY_LOGLEVEL", "derp")
_, err = Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, NotNil)
require.Error(suite.T(), err)
}
// TestParseWithoutStorageValidation validates that the parser will not fail to parse a configuration if a storage
// driver was not set but WithoutStorageValidation was passed as an option.
func (suite *ConfigSuite) TestParseWithoutStorageValidation(c *C) {
func (suite *ConfigSuite) TestParseWithoutStorageValidation() {
configYaml := "version: 0.1"
_, err := Parse(bytes.NewReader([]byte(configYaml)))
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "no storage configuration provided")
require.Error(suite.T(), err)
require.ErrorContains(suite.T(), err, "no storage configuration provided")
_, err = Parse(bytes.NewReader([]byte(configYaml)), WithoutStorageValidation())
c.Assert(err, IsNil)
require.NoError(suite.T(), err)
}
type parameterTest struct {
......@@ -665,7 +663,7 @@ storage: inmemory
}
// TestParseWithDifferentEnvDatabase validates that environment variables properly override database parameters
func (suite *ConfigSuite) TestParseWithDifferentEnvDatabase(c *C) {
func (suite *ConfigSuite) TestParseWithDifferentEnvDatabase() {
expected := Database{
Enabled: true,
Host: "127.0.0.1",
......@@ -685,10 +683,6 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvDatabase(c *C) {
Nameserver: "localhost",
Port: 8600,
Record: "db-replica-registry.service.consul",
RecordCheckInterval: 1 * time.Minute,
DisconnectTimeout: 2 * time.Minute,
MaxReplicaLagTime: 1 * time.Minute,
MaxReplicaLagBytes: 8388608,
ReplicaCheckInterval: 1 * time.Minute,
},
}
......@@ -703,23 +697,23 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvDatabase(c *C) {
os.Setenv("REGISTRY_DATABASE_SSLMODE", expected.SSLMode)
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), suite.expectedConfig, config)
}
// TestParseInvalidVersion validates that the parser will fail to parse a newer configuration
// version than the CurrentVersion
func (suite *ConfigSuite) TestParseInvalidVersion(c *C) {
func (suite *ConfigSuite) TestParseInvalidVersion() {
suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1)
configBytes, err := yaml.Marshal(suite.expectedConfig)
c.Assert(err, IsNil)
require.NoError(suite.T(), err)
_, err = Parse(bytes.NewReader(configBytes))
c.Assert(err, NotNil)
require.Error(suite.T(), err)
}
// TestParseExtraneousVars validates that environment variables referring to
// nonexistent variables don't cause side effects.
func (suite *ConfigSuite) TestParseExtraneousVars(c *C) {
func (suite *ConfigSuite) TestParseExtraneousVars() {
suite.expectedConfig.Reporting.Sentry.Environment = "test"
// A valid environment variable
......@@ -730,13 +724,13 @@ func (suite *ConfigSuite) TestParseExtraneousVars(c *C) {
os.Setenv("REGISTRY_REPORTING_ASDF", "ghjk")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), suite.expectedConfig, config)
}
// TestParseEnvVarImplicitMaps validates that environment variables can set
// values in maps that don't already exist.
func (suite *ConfigSuite) TestParseEnvVarImplicitMaps(c *C) {
func (suite *ConfigSuite) TestParseEnvVarImplicitMaps() {
readonly := make(map[string]interface{})
readonly["enabled"] = true
......@@ -748,41 +742,41 @@ func (suite *ConfigSuite) TestParseEnvVarImplicitMaps(c *C) {
os.Setenv("REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED", "true")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, suite.expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), suite.expectedConfig, config)
}
// TestParseEnvWrongTypeMap validates that incorrectly attempting to unmarshal a
// string over existing map fails.
func (suite *ConfigSuite) TestParseEnvWrongTypeMap(c *C) {
func (suite *ConfigSuite) TestParseEnvWrongTypeMap() {
os.Setenv("REGISTRY_STORAGE_S3", "somestring")
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, NotNil)
require.Error(suite.T(), err)
}
// TestParseEnvWrongTypeStruct validates that incorrectly attempting to
// unmarshal a string into a struct fails.
func (suite *ConfigSuite) TestParseEnvWrongTypeStruct(c *C) {
func (suite *ConfigSuite) TestParseEnvWrongTypeStruct() {
os.Setenv("REGISTRY_STORAGE_LOG", "somestring")
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, NotNil)
require.Error(suite.T(), err)
}
// TestParseEnvWrongTypeSlice validates that incorrectly attempting to
// unmarshal a string into a slice fails.
func (suite *ConfigSuite) TestParseEnvWrongTypeSlice(c *C) {
func (suite *ConfigSuite) TestParseEnvWrongTypeSlice() {
os.Setenv("REGISTRY_HTTP_TLS_CLIENTCAS", "somestring")
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, NotNil)
require.Error(suite.T(), err)
}
// TestParseEnvMany tests several environment variable overrides.
// The result is not checked - the goal of this test is to detect panics
// from misuse of reflection.
func (suite *ConfigSuite) TestParseEnvMany(c *C) {
func (suite *ConfigSuite) TestParseEnvMany() {
os.Setenv("REGISTRY_VERSION", "0.1")
os.Setenv("REGISTRY_LOG_LEVEL", "debug")
os.Setenv("REGISTRY_LOG_FORMATTER", "json")
......@@ -794,7 +788,7 @@ func (suite *ConfigSuite) TestParseEnvMany(c *C) {
os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2")
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, IsNil)
require.NoError(suite.T(), err)
}
func boolParameterTests(defaultValue bool) []parameterTest {
......@@ -1597,97 +1591,6 @@ database:
testParameter(t, yml, "REGISTRY_DATABASE_LOADBALANCING_RECORD", tt, validator)
}
func TestParseDatabaseLoadBalancing_RecordCheckInterval(t *testing.T) {
yml := `
version: 0.1
storage: inmemory
database:
loadbalancing:
enabled: true
recordcheckinterval: %s
`
tt := []parameterTest{
{name: "default", want: defaultDLBRecordCheckInterval},
{name: "custom", value: "2m", want: 2 * time.Minute},
}
validator := func(t *testing.T, want interface{}, got *Configuration) {
require.Equal(t, want, got.Database.LoadBalancing.RecordCheckInterval)
}
testParameter(t, yml, "REGISTRY_DATABASE_LOADBALANCING_RECORDCHECKINTERVAL", tt, validator)
}
func TestParseDatabaseLoadBalancing_DisconnectTimeout(t *testing.T) {
yml := `
version: 0.1
storage: inmemory
database:
loadbalancing:
enabled: true
disconnecttimeout: %s
`
tt := []parameterTest{
{name: "default", want: defaultDLBDisconnectTimeout},
{name: "custom", value: "3m", want: 3 * time.Minute},
}
validator := func(t *testing.T, want interface{}, got *Configuration) {
require.Equal(t, want, got.Database.LoadBalancing.DisconnectTimeout)
}
testParameter(t, yml, "REGISTRY_DATABASE_LOADBALANCING_DISCONNECTTIMEOUT", tt, validator)
}
func TestParseDatabaseLoadBalancing_MaxReplicaLagBytes(t *testing.T) {
yml := `
version: 0.1
storage: inmemory
database:
loadbalancing:
enabled: true
maxreplicalagbytes: %s
`
tt := []parameterTest{
{
name: "default",
want: defaultDLBMaxReplicaLagBytes,
},
{
name: "custom",
value: "1234",
want: 1234,
},
}
validator := func(t *testing.T, want interface{}, got *Configuration) {
require.Equal(t, want, got.Database.LoadBalancing.MaxReplicaLagBytes)
}
testParameter(t, yml, "REGISTRY_DATABASE_LOADBALANCING_MAXREPLICALAGBYTES", tt, validator)
}
func TestParseDatabaseLoadBalancing_MaxReplicaLagTime(t *testing.T) {
yml := `
version: 0.1
storage: inmemory
database:
loadbalancing:
enabled: true
maxreplicalagtime: %s
`
tt := []parameterTest{
{name: "default", want: defaultDLBMaxReplicaLagTime},
{name: "custom", value: "2m", want: 2 * time.Minute},
}
validator := func(t *testing.T, want interface{}, got *Configuration) {
require.Equal(t, want, got.Database.LoadBalancing.MaxReplicaLagTime)
}
testParameter(t, yml, "REGISTRY_DATABASE_LOADBALANCING_MAXREPLICALAGTIME", tt, validator)
}
func TestParseDatabaseLoadBalancing_ReplicaCheckInterval(t *testing.T) {
yml := `
version: 0.1
......@@ -1780,45 +1683,41 @@ reporting:
testParameter(t, yml, "REGISTRY_REPORTING_SENTRY_ENVIRONMENT", tt, validator)
}
func checkStructs(c *C, t reflect.Type, structsChecked map[string]struct{}) {
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Map || t.Kind() == reflect.Slice {
t = t.Elem()
func checkStructs(t require.TestingT, rt reflect.Type, structsChecked map[string]struct{}) {
for rt.Kind() == reflect.Ptr || rt.Kind() == reflect.Map || rt.Kind() == reflect.Slice {
rt = rt.Elem()
}
if t.Kind() != reflect.Struct {
if rt.Kind() != reflect.Struct {
return
}
if _, present := structsChecked[t.String()]; present {
if _, present := structsChecked[rt.String()]; present {
// Already checked this type
return
}
structsChecked[t.String()] = struct{}{}
structsChecked[rt.String()] = struct{}{}
byUpperCase := make(map[string]int)
for i := 0; i < t.NumField(); i++ {
sf := t.Field(i)
for i := 0; i < rt.NumField(); i++ {
sf := rt.Field(i)
// Check that the yaml tag does not contain an _.
yamlTag := sf.Tag.Get("yaml")
if strings.Contains(yamlTag, "_") {
c.Fatalf("yaml field name includes _ character: %s", yamlTag)
}
require.NotContainsf(t, yamlTag, "_", "yaml field name includes _ character: %s", yamlTag)
upper := strings.ToUpper(sf.Name)
if _, present := byUpperCase[upper]; present {
c.Fatalf("field name collision in configuration object: %s", sf.Name)
}
require.NotContainsf(t, byUpperCase, upper, "field name collision in configuration object: %s", sf.Name)
byUpperCase[upper] = i
checkStructs(c, sf.Type, structsChecked)
checkStructs(t, sf.Type, structsChecked)
}
}
// TestValidateConfigStruct makes sure that the config struct has no members
// with yaml tags that would be ambiguous to the environment variable parser.
func (suite *ConfigSuite) TestValidateConfigStruct(c *C) {
func (suite *ConfigSuite) TestValidateConfigStruct() {
structsChecked := make(map[string]struct{})
checkStructs(c, reflect.TypeOf(Configuration{}), structsChecked)
checkStructs(suite.T(), reflect.TypeOf(Configuration{}), structsChecked)
}
func copyConfig(config Configuration) *Configuration {
......
......@@ -3,8 +3,10 @@ package configuration
import (
"os"
"reflect"
"testing"
. "gopkg.in/check.v1"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
type localConfiguration struct {
......@@ -23,11 +25,15 @@ var expectedConfig = localConfiguration{
},
}
type ParserSuite struct{}
func TestParserSuite(t *testing.T) {
suite.Run(t, new(ParserSuite))
}
var _ = Suite(new(ParserSuite))
type ParserSuite struct {
suite.Suite
}
func (suite *ParserSuite) TestParserOverwriteIninitializedPoiner(c *C) {
func (suite *ParserSuite) TestParserOverwriteIninitializedPoiner() {
config := localConfiguration{}
os.Setenv("REGISTRY_LOG_FORMATTER", "json")
......@@ -44,11 +50,11 @@ func (suite *ParserSuite) TestParserOverwriteIninitializedPoiner(c *C) {
})
err := p.Parse([]byte(`{version: "0.1", log: {formatter: "text"}}`), &config)
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), expectedConfig, config)
}
func (suite *ParserSuite) TestParseOverwriteUnininitializedPoiner(c *C) {
func (suite *ParserSuite) TestParseOverwriteUnininitializedPoiner() {
config := localConfiguration{}
os.Setenv("REGISTRY_LOG_FORMATTER", "json")
......@@ -65,6 +71,6 @@ func (suite *ParserSuite) TestParseOverwriteUnininitializedPoiner(c *C) {
})
err := p.Parse([]byte(`{version: "0.1"}`), &config)
c.Assert(err, IsNil)
c.Assert(config, DeepEquals, expectedConfig)
require.NoError(suite.T(), err)
require.Equal(suite.T(), expectedConfig, config)
}
......@@ -622,10 +622,6 @@ database:
nameserver: localhost
port: 8600
record: db-replica-registry.service.consul
recordcheckinterval: 1m
disconnecttimeout: 2m
maxreplicalagtime: 1m
maxreplicalagbytes: 8388608
replicacheckinterval: 1m
```
......@@ -697,10 +693,6 @@ loadbalancing:
nameserver: localhost
port: 8600
record: db-replica-registry.service.consul
recordcheckinterval: 1m
disconnecttimeout: 2m
maxreplicalagtime: 1m
maxreplicalagbytes: 8388608
replicacheckinterval: 1m
```
......@@ -710,10 +702,6 @@ loadbalancing:
| `nameserver` | No | The nameserver to use for looking up the DNS record. | `localhost` |
| `port` | No | The port of the nameserver. | `8600` |
| `record` | Yes | The `SRV` record to look up. This option is required for service discovery to work. | |
| `recordcheckinterval` | No | The minimum amount of time between checking the DNS record. | `1m` |
| `disconnecttimeout` | No | The amount of time after which an old connection is closed, after the list of hosts was updated. | `2m` |
| `maxreplicalagbytes` | No | The amount of data (in bytes) a replica is allowed to lag behind before being quarantined. | `8388608` (8MiB) |
| `maxreplicalagtime` | No | The maximum amount of time a replica is allowed to lag behind before being quarantined. | `1m` |
| `replicacheckinterval` | No | The minimum amount of time between checking the status of a replica. | `1m` |
## `auth`
......
......@@ -114,10 +114,6 @@ database:
nameserver: localhost
port: 8600
record: db-replica-registry.service.consul
recordcheckinterval: 1m
disconnecttimeout: 2m
maxreplicalagtime: 1m
maxreplicalagbytes: 8388608
replicacheckinterval: 1m
```
......@@ -127,10 +123,6 @@ database:
| `nameserver` | No | The nameserver to use for looking up the DNS record. | `localhost` |
| `port` | No | The port of the nameserver. | `8600` |
| `record` | Yes | The `SRV` record to look up. This option is required for service discovery to work. | |
| `recordcheckinterval` | No | The minimum amount of time between checking the DNS record. | `1m` |
| `disconnecttimeout` | No | The amount of time after which an old connection is closed, after the list of hosts was updated. | `2m` |
| `maxreplicalagbytes` | No | The amount of data (in bytes) a replica is allowed to lag behind before being quarantined. | `8388608` (8MiB) |
| `maxreplicalagtime` | No | The maximum amount of time a replica is allowed to lag behind before being quarantined. | `1m` |
| `replicacheckinterval` | No | The minimum amount of time between checking the status of a replica. | `1m` |
We'll refer to each of these configuration parameters and their purpose in the following sections.
......@@ -167,11 +159,11 @@ See [RFC 2782](https://datatracker.ietf.org/doc/html/rfc2782) for more details a
To ensure fault tolerance, the Container Registry will:
- Periodically (`recordcheckinterval`) refresh the list of resolved replica addresses asynchronously to maintain up-to-date information;
- Periodically (`replicacheckinterval`) refresh the list of resolved replica addresses asynchronously to maintain up-to-date information;
- Trigger an immediate refresh of the replica list in case of network/connectivity errors, as these might indicate events such as a cluster failover;
- Gracefully expire open connections (`disconnecttimeout`) if the list of hosts changes, ensuring that stale connections are closed;
- Gracefully expire open connections if the list of hosts changes, ensuring that stale connections are closed;
- Fallback to the primary server if all replicas are unavailable or unresponsive. This also relates to [Primary Sticking](#primary-sticking).
......@@ -217,12 +209,14 @@ sequenceDiagram
To avoid stale reads, the Container Registry will:
- After a successful API write request for repository `R`, check the [Log Sequence Number (LSN)](https://www.postgresql.org/docs/14/wal-internals.html) for the Primary ([`pg_current_wal_insert_lsn`](https://www.postgresql.org/docs/14/functions-admin.html#FUNCTIONS-ADMIN-BACKUP)) and record it in Redis, associated with `R` with a TTL*****.
- After a successful API write request for repository `R`, check the [Log Sequence Number (LSN)](https://www.postgresql.org/docs/14/wal-internals.html) for the Primary ([`pg_current_wal_insert_lsn`](https://www.postgresql.org/docs/14/functions-admin.html#FUNCTIONS-ADMIN-BACKUP)) and conditionally update it in Redis, associated with `R` with a TTL.
The LSN represents the current write-ahead log (WAL) insert location, and can be used to determine if and how far apart primary and replicas are in terms of data replication.
This strategy leverages the fact that the target repository path is part of every write API request's path. Therefore, it's possible to univocally determine the target repository for each write request.
The comparison and conditional update of LSN records in Redis is done atomically using a Lua script to avoid race conditions. LSNs in PostgreSQL are represented as `X/Y` strings, where `X` is the major part (higher order bits) and `Y` is the minor part (lower order bits) in hexadecimal. For proper comparison, the script converts both the current and new LSNs into 64-bit numeric values before comparing them, and only updates if the new LSN is greater or new.
- When serving an API read request for `R`, check the LSN of the candidate replica ([`pg_last_wal_replay_lsn`](https://www.postgresql.org/docs/14/functions-admin.html#FUNCTIONS-ADMIN-BACKUP)) and compare it ([`pg_wal_lsn_diff`](https://www.postgresql.org/docs/14/functions-admin.html#FUNCTIONS-ADMIN-BACKUP)) with the previously recorded primary LSN for `R`. This can be done in a single query against the replica.
If the record does not exist in Redis, the request should be served by the replica.
......
......@@ -39,7 +39,7 @@ docker run --network=host -t --entrypoint=/bin/sh minio/mc \
Now you can run the S3 integration tests against the MinIO server we created above:
```shell
go test -timeout 20m -v github.com/docker/distribution/registry/storage/driver/s3-aws -args -check.v
go test -timeout 20m -v github.com/docker/distribution/registry/storage/driver/s3-aws
```
Finally, the MinIO server can be stopped once you are finished with the
......@@ -55,7 +55,7 @@ To run the benchmarks against any configured driver, run the following
command, substituting the appropriate driver:
```shell
go test -v -cpuprofile profile.out github.com/docker/distribution/registry/storage/driver/s3-aws -args -check.v -check.b
go test -v -cpuprofile profile.out github.com/docker/distribution/registry/storage/driver/s3-aws
```
Afterwards, the `profile.out` file we generated above can be used to analyze
......
......@@ -5,10 +5,10 @@ go 1.22
toolchain go1.22.7
require (
cloud.google.com/go/storage v1.43.0
cloud.google.com/go/storage v1.44.0
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
github.com/DATA-DOG/go-sqlmock v1.5.2
github.com/Shopify/toxiproxy/v2 v2.8.0
github.com/Shopify/toxiproxy/v2 v2.11.0
github.com/alicebob/miniredis/v2 v2.33.0
github.com/aws/aws-sdk-go v1.46.7
github.com/benbjohnson/clock v1.3.5
......@@ -17,7 +17,7 @@ require (
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7
github.com/eko/gocache/lib/v4 v4.1.6
github.com/eko/gocache/store/redis/v4 v4.2.0
github.com/getsentry/sentry-go v0.29.0
github.com/getsentry/sentry-go v0.29.1
github.com/go-redis/redismock/v9 v9.0.3
github.com/gorilla/handlers v1.5.2
github.com/gorilla/mux v1.8.1
......@@ -27,35 +27,36 @@ require (
github.com/olekukonko/tablewriter v0.0.5
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0
github.com/prometheus/client_golang v1.20.4
github.com/redis/go-redis/v9 v9.6.1
github.com/prometheus/client_golang v1.20.5
github.com/redis/go-redis/v9 v9.7.0
github.com/rubenv/sql-migrate v1.7.0
github.com/schollz/progressbar/v3 v3.16.0
github.com/schollz/progressbar/v3 v3.16.1
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.1
github.com/spf13/viper v1.19.0
github.com/stretchr/testify v1.9.0
github.com/trim21/go-redis-prometheus v0.0.0
github.com/vmihailenco/msgpack/v5 v5.4.1
github.com/xanzy/go-gitlab v0.109.0
gitlab.com/gitlab-org/labkit v1.21.0
github.com/xanzy/go-gitlab v0.112.0
gitlab.com/gitlab-org/labkit v1.21.2
go.uber.org/automaxprocs v1.6.0
go.uber.org/mock v0.4.0
golang.org/x/crypto v0.27.0
golang.org/x/crypto v0.28.0
golang.org/x/oauth2 v0.23.0
golang.org/x/sync v0.8.0
golang.org/x/time v0.6.0
google.golang.org/api v0.199.0
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
golang.org/x/time v0.7.0
google.golang.org/api v0.201.0
gopkg.in/yaml.v2 v2.4.0
)
require (
cloud.google.com/go v0.115.1 // indirect
cloud.google.com/go/auth v0.9.5 // indirect
cel.dev/expr v0.16.1 // indirect
cloud.google.com/go v0.116.0 // indirect
cloud.google.com/go/auth v0.9.8 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
cloud.google.com/go/compute/metadata v0.5.2 // indirect
cloud.google.com/go/iam v1.2.0 // indirect
cloud.google.com/go/iam v1.2.1 // indirect
cloud.google.com/go/monitoring v1.21.1 // indirect
cloud.google.com/go/profiler v0.1.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
......@@ -64,13 +65,19 @@ require (
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/client9/reopen v1.0.0 // indirect
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dnaeon/go-vcr v1.0.1 // indirect
github.com/envoyproxy/go-control-plane v0.13.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
......@@ -97,8 +104,6 @@ require (
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
......@@ -107,12 +112,12 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/oklog/ulid/v2 v2.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a // indirect
......@@ -123,24 +128,29 @@ require (
github.com/subosito/gotenv v1.6.0 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/yuin/gopher-lua v1.1.1 // indirect
gitlab.com/gitlab-org/go/reopen v1.0.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
go.opentelemetry.io/otel v1.29.0 // indirect
go.opentelemetry.io/otel/metric v1.29.0 // indirect
go.opentelemetry.io/otel/sdk v1.29.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect
go.opentelemetry.io/otel/trace v1.29.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/net v0.29.0 // indirect
golang.org/x/sys v0.25.0 // indirect
golang.org/x/term v0.24.0 // indirect
golang.org/x/text v0.18.0 // indirect
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
google.golang.org/grpc v1.67.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/term v0.25.0 // indirect
golang.org/x/text v0.19.0 // indirect
google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect
google.golang.org/grpc v1.67.1 // indirect
google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect
google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
This diff is collapsed.
......@@ -45,6 +45,14 @@ var BBMProcess = Feature{
EnvVariable: "REGISTRY_FF_BBM",
}
// EnforceLockfiles is used to enable lock file checking for the `database-in-use` and `filesystem-in-use` checks.
// Enabling this feature will stop the registry in an unsupported mode. See
// https://gitlab.com/gitlab-org/container-registry/-/blob/master/docs/spec/gitlab/lockfiles.md
// for more information.
var EnforceLockfiles = Feature{
EnvVariable: "REGISTRY_FF_ENFORCE_LOCKFILES",
}
// testFeature is used for testing purposes only
var testFeature = Feature{
EnvVariable: "REGISTRY_FF_TEST",
......
......@@ -47,6 +47,10 @@ type Namespace interface {
// BlobStatter returns a BlobStatter to control
BlobStatter() BlobStatter
// Lockers returns file lockers used to protect a registry's metadata
// from running in the incorrect mode (filesystem vs database).
Lockers() Lockers
}
// RepositoryEnumerator describes an operation to enumerate repositories
......@@ -108,3 +112,19 @@ type Repository interface {
// Tags returns a reference to this repositories tag service
Tags(ctx context.Context) TagService
}
// Lockers returns file lockers used to protect a registry's metadata
// from running in the incorrect mode (filesystem vs database).
type Lockers interface {
// DBLock creates the database-in-use lockfile in the storage driver
DBLock(ctx context.Context) error
// DBUnlock removes the database-in-use lockfile in the storage driver
DBUnlock(ctx context.Context) error
// DBIsLocked returns whether the registry is using the database-in-use lockfile
DBIsLocked(ctx context.Context) (bool, error)
// FSLock creates the filesystem-in-use file in the storage driver
FSLock(ctx context.Context) error
// FSUnlock removes the filesystem-in-use file in the storage driver
FSUnlock(ctx context.Context) error
}
......@@ -717,10 +717,13 @@ func (lb *DBLoadBalancer) StartReplicaChecking(ctx context.Context) error {
}
}
// NewDBLoadBalancer initializes a DBLoadBalancer with primary and replica connections.
// NewDBLoadBalancer initializes a DBLoadBalancer with primary and replica connections. An error is returned if failed
// to connect to the primary server. Failures to connect to replica server(s) are handled gracefully, that is, logged,
// reported and ignored. This is to prevent halting the app start, as it can function with the primary server only.
// DBLoadBalancer.StartReplicaChecking can be used to periodically refresh the list of replicas, potentially leading to
// the self-healing of transient connection failures during this initialization.
func NewDBLoadBalancer(ctx context.Context, primaryDSN *DSN, opts ...Option) (*DBLoadBalancer, error) {
config := applyOptions(opts)
var result *multierror.Error
lb := &DBLoadBalancer{
active: config.loadBalancing.active,
......@@ -738,25 +741,22 @@ func NewDBLoadBalancer(ctx context.Context, primaryDSN *DSN, opts ...Option) (*D
primary, err := lb.connector.Open(ctx, primaryDSN, opts...)
if err != nil {
result = multierror.Append(result, fmt.Errorf("failed to open primary database connection: %w", err))
return nil, fmt.Errorf("failed to open primary database connection: %w", err)
}
lb.primary = primary
// Conditionally register metrics for the primary database handle
if lb.metricsEnabled && primary != nil {
if lb.metricsEnabled {
lb.promRegisterer.MustRegister(lb.metricsCollector(primary, HostTypePrimary))
}
if lb.active {
if err := lb.ResolveReplicas(ctx); err != nil {
result = multierror.Append(result, err)
lb.logger(ctx).WithError(err).Error("failed to resolve database load balancing replicas")
errortracking.Capture(err, errortracking.WithContext(ctx), errortracking.WithStackTrace())
}
}
if result.ErrorOrNil() != nil {
return nil, result.ErrorOrNil()
}
return lb, nil
}
......
......@@ -214,13 +214,13 @@ func TestNewDBLoadBalancer_WithFixedHosts_ConnectionError(t *testing.T) {
name string
primaryDSN *datastore.DSN
replicaHosts []string
expectedErrs []string
expectedErr string
}{
{
name: "primary connection fails",
primaryDSN: &datastore.DSN{Host: "fail_primary"},
replicaHosts: []string{"replica1"},
expectedErrs: []string{"failed to open primary database connection: primary connection failed"},
expectedErr: "failed to open primary database connection: primary connection failed",
},
{
name: "one replica connection fails",
......@@ -229,7 +229,6 @@ func TestNewDBLoadBalancer_WithFixedHosts_ConnectionError(t *testing.T) {
"replica1",
"fail_replica2",
},
expectedErrs: []string{`failed to open replica "fail_replica2:5432" database connection: replica connection failed`},
},
{
name: "multiple replica connections fail",
......@@ -238,10 +237,6 @@ func TestNewDBLoadBalancer_WithFixedHosts_ConnectionError(t *testing.T) {
"fail_replica1",
"fail_replica2",
},
expectedErrs: []string{
`failed to open replica "fail_replica1:5432" database connection: replica connection failed`,
`failed to open replica "fail_replica2:5432" database connection: replica connection failed`,
},
},
{
name: "primary and replica connections fail",
......@@ -249,10 +244,7 @@ func TestNewDBLoadBalancer_WithFixedHosts_ConnectionError(t *testing.T) {
replicaHosts: []string{
"fail_replica2",
},
expectedErrs: []string{
`failed to open primary database connection: primary connection failed`,
`failed to open replica "fail_replica2:1234" database connection: replica connection failed`,
},
expectedErr: "failed to open primary database connection: primary connection failed",
},
}
......@@ -264,15 +256,12 @@ func TestNewDBLoadBalancer_WithFixedHosts_ConnectionError(t *testing.T) {
datastore.WithConnector(mockConnector),
datastore.WithFixedHosts(tt.replicaHosts),
)
require.Nil(t, lb)
var errs *multierror.Error
require.ErrorAs(t, err, &errs)
require.NotNil(t, errs)
require.Len(t, errs.Errors, len(tt.expectedErrs))
for _, expectedErr := range tt.expectedErrs {
require.Contains(t, errs.Error(), expectedErr)
if tt.expectedErr != "" {
require.Nil(t, lb)
require.ErrorContains(t, err, tt.expectedErr)
} else {
require.NotNil(t, lb)
require.NoError(t, err)
}
})
}
......@@ -370,7 +359,7 @@ func TestNewDBLoadBalancer_WithServiceDiscovery(t *testing.T) {
require.NoError(t, replicaMock2.ExpectationsWereMet())
}
func TestNewDBLoadBalancer_WithServiceDiscovery_SRVLookupError(t *testing.T) {
func TestDBLoadBalancer_ResolveReplicas_SRVLookupError(t *testing.T) {
ctrl := gomock.NewController(t)
mockResolver := mocks.NewMockDNSResolver(ctrl)
......@@ -379,7 +368,8 @@ func TestNewDBLoadBalancer_WithServiceDiscovery_SRVLookupError(t *testing.T) {
// Mock the expected DNS lookups with an error
mockResolver.EXPECT().
LookupSRV(gomock.Any()).
Return(nil, fmt.Errorf("DNS SRV lookup error"))
Return(nil, fmt.Errorf("DNS SRV lookup error")).
Times(2)
primaryMockDB, primaryMock, err := sqlmock.New()
require.NoError(t, err)
......@@ -398,20 +388,23 @@ func TestNewDBLoadBalancer_WithServiceDiscovery_SRVLookupError(t *testing.T) {
mockConnector.EXPECT().Open(gomock.Any(), primaryDSN, gomock.Any()).Return(&datastore.DB{DB: primaryMockDB}, nil).AnyTimes()
// Create the load balancer with the service discovery option
_, err = datastore.NewDBLoadBalancer(
lb, err := datastore.NewDBLoadBalancer(
context.Background(),
primaryDSN,
datastore.WithConnector(mockConnector),
datastore.WithServiceDiscovery(mockResolver),
)
require.Error(t, err)
require.NoError(t, err)
require.NotNil(t, lb)
err = lb.ResolveReplicas(context.Background())
require.ErrorContains(t, err, "error resolving DNS SRV record: DNS SRV lookup error")
// Verify mock expectations
require.NoError(t, primaryMock.ExpectationsWereMet())
}
func TestNewDBLoadBalancer_WithServiceDiscovery_HostLookupError(t *testing.T) {
func TestDBLoadBalancer_ResolveReplicas_HostLookupError(t *testing.T) {
ctrl := gomock.NewController(t)
mockResolver := mocks.NewMockDNSResolver(ctrl)
......@@ -422,12 +415,14 @@ func TestNewDBLoadBalancer_WithServiceDiscovery_HostLookupError(t *testing.T) {
LookupSRV(gomock.Any()).
Return([]*net.SRV{
{Target: "srv1.example.com", Port: 6432},
}, nil)
}, nil).
Times(2)
// Mock the expected host lookup with an error
mockResolver.EXPECT().
LookupHost(gomock.Any(), "srv1.example.com").
Return(nil, fmt.Errorf("DNS host lookup error"))
Return(nil, fmt.Errorf("DNS host lookup error")).
Times(2)
primaryMockDB, primaryMock, err := sqlmock.New()
require.NoError(t, err)
......@@ -446,13 +441,16 @@ func TestNewDBLoadBalancer_WithServiceDiscovery_HostLookupError(t *testing.T) {
mockConnector.EXPECT().Open(gomock.Any(), primaryDSN, gomock.Any()).Return(&datastore.DB{DB: primaryMockDB}, nil).AnyTimes()
// Create the load balancer with the service discovery option
_, err = datastore.NewDBLoadBalancer(
lb, err := datastore.NewDBLoadBalancer(
context.Background(),
primaryDSN,
datastore.WithConnector(mockConnector),
datastore.WithServiceDiscovery(mockResolver),
)
require.Error(t, err)
require.NoError(t, err)
require.NotNil(t, lb)
err = lb.ResolveReplicas(context.Background())
require.ErrorContains(t, err, `error resolving host "srv1.example.com" address: DNS host lookup error`)
// Verify mock expectations
......@@ -520,23 +518,15 @@ func TestNewDBLoadBalancer_WithServiceDiscovery_ConnectionError(t *testing.T) {
name string
primaryDSN *datastore.DSN
mockExpectFunc func()
expectedErrors []string
expectedErr string
}{
{
name: "primary connection fails",
primaryDSN: failPrimaryDSN,
mockExpectFunc: func() {
mockConnector.EXPECT().Open(gomock.Any(), failPrimaryDSN, gomock.Any()).Return(nil, fmt.Errorf("primary connection failed"))
replica1MockDB, _, err := sqlmock.New()
require.NoError(t, err)
defer replica1MockDB.Close()
replica2MockDB, _, err := sqlmock.New()
require.NoError(t, err)
defer replica2MockDB.Close()
mockConnector.EXPECT().Open(gomock.Any(), replica1DSN, gomock.Any()).Return(&datastore.DB{DB: replica1MockDB}, nil)
mockConnector.EXPECT().Open(gomock.Any(), replica2DSN, gomock.Any()).Return(&datastore.DB{DB: replica2MockDB}, nil)
},
expectedErrors: []string{"failed to open primary database connection: primary connection failed"},
expectedErr: "failed to open primary database connection: primary connection failed",
},
{
name: "one replica connection fails",
......@@ -552,7 +542,6 @@ func TestNewDBLoadBalancer_WithServiceDiscovery_ConnectionError(t *testing.T) {
defer replica2MockDB.Close()
mockConnector.EXPECT().Open(gomock.Any(), replica2DSN, gomock.Any()).Return(&datastore.DB{DB: replica2MockDB}, nil)
},
expectedErrors: []string{`failed to open replica "192.168.1.1:6432" database connection: failed to open replica 1`},
},
{
name: "multiple replica connections fail",
......@@ -565,24 +554,14 @@ func TestNewDBLoadBalancer_WithServiceDiscovery_ConnectionError(t *testing.T) {
mockConnector.EXPECT().Open(gomock.Any(), replica1DSN, gomock.Any()).Return(nil, fmt.Errorf("failed to open replica 1"))
mockConnector.EXPECT().Open(gomock.Any(), replica2DSN, gomock.Any()).Return(nil, fmt.Errorf("failed to open replica 2"))
},
expectedErrors: []string{
`failed to open replica "192.168.1.1:6432" database connection: failed to open replica 1`,
`failed to open replica "192.168.1.2:6433" database connection: failed to open replica 2`,
},
},
{
name: "primary and replica connections fail",
primaryDSN: failPrimaryDSN,
mockExpectFunc: func() {
mockConnector.EXPECT().Open(gomock.Any(), failPrimaryDSN, gomock.Any()).Return(nil, fmt.Errorf("primary connection failed"))
mockConnector.EXPECT().Open(gomock.Any(), replica1DSN, gomock.Any()).Return(nil, fmt.Errorf("failed to open replica 1"))
mockConnector.EXPECT().Open(gomock.Any(), replica2DSN, gomock.Any()).Return(nil, fmt.Errorf("failed to open replica 2"))
},
expectedErrors: []string{
`failed to open primary database connection: primary connection failed`,
`failed to open replica "192.168.1.1:6432" database connection: failed to open replica 1`,
`failed to open replica "192.168.1.2:6433" database connection: failed to open replica 2`,
},
expectedErr: "failed to open primary database connection: primary connection failed",
},
}
......@@ -596,16 +575,12 @@ func TestNewDBLoadBalancer_WithServiceDiscovery_ConnectionError(t *testing.T) {
datastore.WithConnector(mockConnector),
datastore.WithServiceDiscovery(mockResolver),
)
require.Error(t, err)
require.Nil(t, lb)
var errs *multierror.Error
require.ErrorAs(t, err, &errs)
require.NotNil(t, errs)
require.Len(t, errs.Errors, len(tt.expectedErrors))
for _, expectedErr := range tt.expectedErrors {
require.Contains(t, errs.Error(), expectedErr)
if tt.expectedErr != "" {
require.Nil(t, lb)
require.ErrorContains(t, err, tt.expectedErr)
} else {
require.NotNil(t, lb)
require.NoError(t, err)
}
})
}
......@@ -962,11 +937,7 @@ func TestNewDBLoadBalancer_MetricsCollection_Replicas(t *testing.T) {
}
_, err := datastore.NewDBLoadBalancer(ctx, primaryDSN, options...)
if tt.openReplica1Succeeds && tt.openReplica2Succeeds {
require.NoError(t, err)
} else {
require.Error(t, err)
}
require.NoError(t, err)
// Verify registered metrics
metricCount, err := testutil.GatherAndCount(reg)
......