...
 
Commits (14)
......@@ -28,6 +28,7 @@ const Api = {
branchSinglePath: '/api/:version/projects/:id/repository/branches/:branch',
createBranchPath: '/api/:version/projects/:id/repository/branches',
geoNodesPath: '/api/:version/geo_nodes',
subscriptionPath: '/api/:version/namespaces/:id/gitlab_subscription',
group(groupId, callback) {
const url = Api.buildUrl(Api.groupPath).replace(':id', groupId);
......@@ -322,6 +323,12 @@ const Api = {
});
},
userSubscription(namespaceId) {
const url = Api.buildUrl(this.subscriptionPath).replace(':id', encodeURIComponent(namespaceId));
return axios.get(url);
},
buildUrl(url) {
let urlRoot = '';
if (gon.relative_url_root != null) {
......
@import 'framework/variables';
@import 'framework/variables_overrides';
@import 'framework/mixins';
......
# frozen_string_literal: true
# The PoolRepository model is the database equivalent of an ObjectPool for Gitaly
# That is; PoolRepository is the record in the database, ObjectPool is the
# repository on disk
class PoolRepository < ActiveRecord::Base
include Shardable
include AfterCommitQueue
has_one :source_project, class_name: 'Project'
validates :source_project, presence: true
has_many :member_projects, class_name: 'Project'
after_create :correct_disk_path
state_machine :state, initial: :none do
state :scheduled
state :ready
state :failed
event :schedule do
transition none: :scheduled
end
event :mark_ready do
transition [:scheduled, :failed] => :ready
end
event :mark_failed do
transition all => :failed
end
state all - [:ready] do
def joinable?
false
end
end
state :ready do
def joinable?
true
end
end
after_transition none: :scheduled do |pool, _|
pool.run_after_commit do
::ObjectPool::CreateWorker.perform_async(pool.id)
end
end
after_transition scheduled: :ready do |pool, _|
pool.run_after_commit do
::ObjectPool::ScheduleJoinWorker.perform_async(pool.id)
end
end
end
def create_object_pool
object_pool.create
end
# The members of the pool should have fetched the missing objects to their own
# objects directory. If the caller fails to do so, data loss might occur
def delete_object_pool
object_pool.delete
end
def link_repository(repository)
object_pool.link(repository.raw)
end
# This RPC can cause data loss, as not all objects are present the local repository
# No execution path yet, will be added through:
# https://gitlab.com/gitlab-org/gitaly/issues/1415
def delete_repository_alternate(repository)
object_pool.unlink_repository(repository.raw)
end
def object_pool
@object_pool ||= Gitlab::Git::ObjectPool.new(
shard.name,
disk_path. + '.git',
source_project.repository.raw)
end
private
def correct_disk_path
......
......@@ -1584,6 +1584,7 @@ class Project < ActiveRecord::Base
import_state.remove_jid
update_project_counter_caches
after_create_default_branch
join_pool_repository
refresh_markdown_cache!
end
......@@ -1980,8 +1981,48 @@ class Project < ActiveRecord::Base
Gitlab::CurrentSettings.max_attachment_size.megabytes.to_i
end
def object_pool_params
return {} unless !forked? && git_objects_poolable?
{
repository_storage: repository_storage,
pool_repository: pool_repository || create_new_pool_repository
}
end
# Git objects are only poolable when the project is or has:
# - Hashed storage -> The object pool will have a remote to its members, using relative paths.
# If the repository path changes we would have to update the remote.
# - Public -> User will be able to fetch Git objects that might not exist
# in their own repository.
# - Repository -> Else the disk path will be empty, and there's nothing to pool
def git_objects_poolable?
hashed_storage?(:repository) &&
public? &&
repository_exists? &&
Gitlab::CurrentSettings.hashed_storage_enabled &&
Feature.enabled?(:object_pools, self)
end
private
def create_new_pool_repository
pool = begin
create_or_find_pool_repository!(shard: Shard.by_name(repository_storage), source_project: self)
rescue ActiveRecord::RecordNotUnique
retry
end
pool.schedule
pool
end
def join_pool_repository
return unless pool_repository
ObjectPool::JoinWorker.perform_async(pool_repository.id, self.id)
end
def use_hashed_storage
if self.new_record? && Gitlab::CurrentSettings.hashed_storage_enabled
self.storage_version = LATEST_STORAGE_VERSION
......
......@@ -18,7 +18,9 @@ class Shard < ActiveRecord::Base
end
def self.by_name(name)
find_or_create_by(name: name)
transaction(requires_new: true) do
find_or_create_by(name: name)
end
rescue ActiveRecord::RecordNotUnique
retry
end
......
......@@ -54,6 +54,8 @@ module Projects
new_params[:avatar] = @project.avatar
end
new_params.merge!(@project.object_pool_params)
new_project = CreateService.new(current_user, new_params).execute
return new_project unless new_project.persisted?
......
......@@ -10,7 +10,6 @@
- cronjob:prune_old_events
- cronjob:remove_expired_group_links
- cronjob:remove_expired_members
- cronjob:remove_old_web_hook_logs
- cronjob:remove_unreferenced_lfs_objects
- cronjob:repository_archive_cache
- cronjob:repository_check_dispatch
......@@ -86,6 +85,10 @@
- todos_destroyer:todos_destroyer_project_private
- todos_destroyer:todos_destroyer_private_features
- object_pool:object_pool_create
- object_pool:object_pool_schedule_join
- object_pool:object_pool_join
- default
- mailers # ActionMailer::DeliveryJob.queue_name
......
# frozen_string_literal: true
##
# Concern for setting Sidekiq settings for the various ObjectPool queues
#
module ObjectPoolQueue
extend ActiveSupport::Concern
included do
queue_namespace :object_pool
end
end
......@@ -28,6 +28,8 @@ class GitGarbageCollectWorker
# Refresh the branch cache in case garbage collection caused a ref lookup to fail
flush_ref_caches(project) if task == :gc
project.repository.expire_statistics_caches
# In case pack files are deleted, release libgit2 cache and open file
# descriptors ASAP instead of waiting for Ruby garbage collection
project.cleanup
......
# frozen_string_literal: true
module ObjectPool
class CreateWorker
include ApplicationWorker
include ObjectPoolQueue
include ExclusiveLeaseGuard
attr_reader :pool
def perform(pool_id)
@pool = PoolRepository.find_by_id(pool_id)
return unless pool
try_obtain_lease do
perform_pool_creation
end
end
private
def perform_pool_creation
return unless pool.failed? || pool.scheduled?
# If this is a retry and the previous execution failed, deletion will
# bring the pool back to a pristine state
pool.delete_object_pool if pool.failed?
pool.create_object_pool
pool.mark_ready
rescue => e
pool.mark_failed
raise e
end
def lease_key
"object_pool:create:#{pool.id}"
end
private
def lease_timeout
1.hour
end
end
end
# frozen_string_literal: true
module ObjectPool
class JoinWorker
include ApplicationWorker
include ObjectPoolQueue
def perform(pool_id, project_id)
pool = PoolRepository.find_by_id(pool_id)
return unless pool&.joinable?
project = Project.find_by_id(project_id)
return unless project
pool.link_repository(project.repository)
Projects::HousekeepingService.new(project).execute
end
end
end
# frozen_string_literal: true
module ObjectPool
class ScheduleJoinWorker
include ApplicationWorker
include ObjectPoolQueue
def perform(pool_id)
pool = PoolRepository.find_by_id(pool_id)
return unless pool&.joinable?
pool.member_projects.find_each do |project|
next if project.forked? && !project.import_finished?
ObjectPool::JoinWorker.perform_async(pool.id, project.id)
end
end
end
end
# frozen_string_literal: true
class RemoveOldWebHookLogsWorker
include ApplicationWorker
include CronjobQueue
WEB_HOOK_LOG_LIFETIME = 2.days
# rubocop: disable DestroyAll
def perform
WebHookLog.destroy_all(['created_at < ?', Time.now - WEB_HOOK_LOG_LIFETIME])
end
# rubocop: enable DestroyAll
end
---
title: Remove old webhook logs after 90 days, as documented, instead of after 2
merge_request:
author:
type: fixed
---
title: Allow public forks to be deduplicated
merge_request: 23508
author:
type: added
......@@ -373,10 +373,6 @@ Settings.cron_jobs['clear_shared_runners_minutes_worker'] ||= Settingslogic.new(
Settings.cron_jobs['clear_shared_runners_minutes_worker']['cron'] ||= '0 0 1 * *'
Settings.cron_jobs['clear_shared_runners_minutes_worker']['job_class'] = 'ClearSharedRunnersMinutesWorker'
Settings.cron_jobs['remove_old_web_hook_logs_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['remove_old_web_hook_logs_worker']['cron'] ||= '40 0 * * *'
Settings.cron_jobs['remove_old_web_hook_logs_worker']['job_class'] = 'RemoveOldWebHookLogsWorker'
Settings.cron_jobs['stuck_merge_jobs_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['stuck_merge_jobs_worker']['cron'] ||= '0 */2 * * *'
Settings.cron_jobs['stuck_merge_jobs_worker']['job_class'] = 'StuckMergeJobsWorker'
......@@ -393,6 +389,10 @@ Settings.cron_jobs['prune_web_hook_logs_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['prune_web_hook_logs_worker']['cron'] ||= '0 */1 * * *'
Settings.cron_jobs['prune_web_hook_logs_worker']['job_class'] = 'PruneWebHookLogsWorker'
Settings.cron_jobs['update_max_seats_used_for_gitlab_com_subscriptions_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['update_max_seats_used_for_gitlab_com_subscriptions_worker']['cron'] ||= '0 12 * * *'
Settings.cron_jobs['update_max_seats_used_for_gitlab_com_subscriptions_worker']['job_class'] = 'UpdateMaxSeatsUsedForGitlabComSubscriptionsWorker'
#
# Sidekiq
#
......
......@@ -81,6 +81,7 @@
- [delete_diff_files, 1]
- [detect_repository_languages, 1]
- [auto_devops, 2]
- [object_pool, 1]
- [repository_cleanup, 1]
- [delete_stored_files, 1]
......
# frozen_string_literal: true
class AddStateToPoolRepository < ActiveRecord::Migration[5.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
# Given the table is empty, and the non concurrent methods are chosen so
# the transactions don't have to be disabled
# rubocop: disable Migration/AddConcurrentForeignKey, Migration/AddIndex
def change
add_column(:pool_repositories, :state, :string, null: true)
add_column :pool_repositories, :source_project_id, :integer
add_index :pool_repositories, :source_project_id, unique: true
add_foreign_key :pool_repositories, :projects, column: :source_project_id, on_delete: :nullify
end
# rubocop: enable Migration/AddConcurrentForeignKey, Migration/AddIndex
end
......@@ -10,7 +10,7 @@
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 20181204135932) do
ActiveRecord::Schema.define(version: 20181206121340) do
# These are extensions that must be enabled in order to support this database
enable_extension "plpgsql"
......@@ -1307,6 +1307,21 @@ ActiveRecord::Schema.define(version: 20181204135932) do
t.index ["upload_id"], name: "index_geo_upload_deleted_events_on_upload_id", using: :btree
end
create_table "gitlab_subscriptions", id: :bigserial, force: :cascade do |t|
t.datetime_with_timezone "created_at", null: false
t.datetime_with_timezone "updated_at", null: false
t.date "start_date"
t.date "end_date"
t.date "trial_ends_on"
t.integer "namespace_id"
t.integer "hosted_plan_id"
t.integer "max_seats_used", default: 0
t.integer "seats", default: 0
t.boolean "trial", default: false
t.index ["hosted_plan_id"], name: "index_gitlab_subscriptions_on_hosted_plan_id", using: :btree
t.index ["namespace_id"], name: "index_gitlab_subscriptions_on_namespace_id", unique: true, using: :btree
end
create_table "gpg_key_subkeys", force: :cascade do |t|
t.integer "gpg_key_id", null: false
t.binary "keyid"
......@@ -2054,8 +2069,11 @@ ActiveRecord::Schema.define(version: 20181204135932) do
create_table "pool_repositories", id: :bigserial, force: :cascade do |t|
t.integer "shard_id", null: false
t.string "disk_path"
t.string "state"
t.integer "source_project_id"
t.index ["disk_path"], name: "index_pool_repositories_on_disk_path", unique: true, using: :btree
t.index ["shard_id"], name: "index_pool_repositories_on_shard_id", using: :btree
t.index ["source_project_id"], name: "index_pool_repositories_on_source_project_id", unique: true, using: :btree
end
create_table "programming_languages", force: :cascade do |t|
......@@ -3215,6 +3233,8 @@ ActiveRecord::Schema.define(version: 20181204135932) do
add_foreign_key "geo_repository_renamed_events", "projects", on_delete: :cascade
add_foreign_key "geo_repository_updated_events", "projects", on_delete: :cascade
add_foreign_key "geo_reset_checksum_events", "projects", on_delete: :cascade
add_foreign_key "gitlab_subscriptions", "namespaces"
add_foreign_key "gitlab_subscriptions", "plans", column: "hosted_plan_id", name: "fk_bd0c4019c3", on_delete: :cascade
add_foreign_key "gpg_key_subkeys", "gpg_keys", on_delete: :cascade
add_foreign_key "gpg_keys", "users", on_delete: :cascade
add_foreign_key "gpg_signatures", "gpg_key_subkeys", on_delete: :nullify
......@@ -3286,6 +3306,7 @@ ActiveRecord::Schema.define(version: 20181204135932) do
add_foreign_key "path_locks", "projects", name: "fk_5265c98f24", on_delete: :cascade
add_foreign_key "path_locks", "users"
add_foreign_key "personal_access_tokens", "users"
add_foreign_key "pool_repositories", "projects", column: "source_project_id", on_delete: :nullify
add_foreign_key "pool_repositories", "shards", on_delete: :restrict
add_foreign_key "project_authorizations", "projects", on_delete: :cascade
add_foreign_key "project_authorizations", "users", on_delete: :cascade
......
......@@ -231,6 +231,7 @@ This list of limitations only reflects the latest version of GitLab. If you are
- The installation takes multiple manual steps that together can take about an hour depending on circumstances. We are working on improving this experience. See [gitlab-org/omnibus-gitlab#2978](https://gitlab.com/gitlab-org/omnibus-gitlab/issues/2978) for details.
- Real-time updates of issues/merge requests (for example, via long polling) doesn't work on the **secondary** node.
- [Selective synchronization](configuration.md#selective-synchronization) applies only to files and repositories. Other datasets are replicated to the **secondary** node in full, making it inappropriate for use as an access control mechanism.
- Object pools for forked project deduplication work only on the **primary** node, and are duplicated on the **secondary** node.
### Limitations on replication
......
......@@ -93,6 +93,23 @@ need to be performed on these nodes as well. Database changes will propagate wit
You must make sure the migration event was already processed or otherwise it may migrate
the files back to Hashed state again.
#### Hashed object pools
For deduplication of public forks and their parent repository, objects are pooled
in an object pool. These object pools are a third repository where shared objects
are stored.
```ruby
# object pool paths
"@pools/#{hash[0..1]}/#{hash[2..3]}/#{hash}.git"
```
The object pool feature is behind the `object_pools` feature flag, and can be
enabled for individual projects by executing
`Feature.enable(:object_pools, Project.find(<id>))`. Note that the project has to
be on hashed storage, should not be a fork itself, and hashed storage should be
enabled for all new projects.
##### Attachments
To rollback single Attachment migration, rename `aa/bb/abcdef1234567890...` folder back to `namespace/project`.
......
......@@ -74,6 +74,7 @@ learn how to leverage its potential even more.
- [Caching dependencies](caching/index.md)
- [Git submodules](git_submodules.md) - How to run your CI jobs when Git
submodules are involved
- [Pipelines for merge requests](merge_request_pipelines/index.md)
- [Use SSH keys in your build environment](ssh_keys/README.md)
and status of each CI environment running on Kubernetes
- [Trigger pipelines through the GitLab API](triggers/README.md)
......
# Pipelines for merge requests
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ce/issues/15310) in GitLab 11.6
Usually, when a developer creates a new merge request, a pipeline runs on the
new change and checks if it's qualified to be merged into a target branch. This
pipeline should contain only necessary jobs for checking the new changes.
For example, unit tests, lint checks, and Review Apps are often used in this cycle.
With pipelines for merge requests, you can design a specific pipeline structure
for merge requests. All you need to do is just adding `only: [merge_requests]` to
the jobs that you want it to run for only merge requests.
Every time, when developers create or update merge requests, a pipeline runs on
their new commits at every push to GitLab.
NOTE: **Note**:
If you use both this feature and the [Merge When Pipeline Succeeds](../../user/project/merge_requests/merge_when_pipeline_succeeds.md)
feature, pipelines for merge requests take precendence over the other regular pipelines.
For example, consider a GitLab CI/CD configuration in .gitlab-ci.yml as follows:
```yaml
build:
stage: build
script: ./build
only:
- branches
- tags
- merge_requests
test:
stage: test
script: ./test
only:
- merge_requests
deploy:
stage: deploy
script: ./deploy
```
After a developer updated code in a merge request with whatever methods (e.g. `git push`),
GitLab detects that the code is updated and create a new pipeline for the merge request.
The pipeline fetches the latest code from the source branch and run tests against it.
In this example, the pipeline contains only `build` and `test` jobs.
Since `deploy` job does not have the `only: [merge_requests]` rule,
deployment jobs will not happen in the merge request.
Consider this pipeline list viewed from the **Pipelines** tab in a merge request:
![Merge request page](img/merge_request.png)
Note that pipelines tagged as **merge request** indicate that they were triggered
when a merge request was created or updated.
The same tag is shown on the pipeline's details:
![Pipeline's details](img/pipeline_detail.png)
## Important notes about merge requests from forked projects
Note that the current behavior is subject to change. In the usual contribution
flow, external contributors follow the following steps:
1. Fork a parent project.
1. Create a merge request from the forked project that targets the `master` branch
in the parent project.
1. A pipeline runs on the merge request.
1. A mainatiner from the parent project checks the pipeline result, and merge
into a target branch if the latest pipeline has passed.
Currently, those pipelines are created in a **forked** project, not in the
parent project. This means you cannot completely trust the pipeline result,
because, technically, external contributors can disguise their pipeline results
by tweaking their GitLab Runner in the forked project.
There are multiple reasons about why GitLab doesn't allow those pipelines to be
created in the parent project, but one of the biggest reasons is security concern.
External users could steal secret variables from the parent project by modifying
.gitlab-ci.yml, which could be some sort of credentials. This should not happen.
We're discussing a secure solution of running pipelines for merge requests
that submitted from forked projects,
see [the issue about the permission extension](https://gitlab.com/gitlab-org/gitlab-ce/issues/23902).
This diff is collapsed.
......@@ -342,15 +342,16 @@ In addition, `only` and `except` allow the use of special keywords:
| **Value** | **Description** |
| --------- | ---------------- |
| `branches` | When a branch is pushed. |
| `tags` | When a tag is pushed. |
| `api` | When pipeline has been triggered by a second pipelines API (not triggers API). |
| `external` | When using CI services other than GitLab. |
| `pipelines` | For multi-project triggers, created using the API with `CI_JOB_TOKEN`. |
| `pushes` | Pipeline is triggered by a `git push` by the user. |
| `schedules` | For [scheduled pipelines][schedules]. |
| `triggers` | For pipelines created using a trigger token. |
| `web` | For pipelines created using **Run pipeline** button in GitLab UI (under your project's **Pipelines**). |
| `branches` | When a git reference of a pipeline is a branch. |
| `tags` | When a git reference of a pipeline is a tag. |
| `api` | When pipeline has been triggered by a second pipelines API (not triggers API). |
| `external` | When using CI services other than GitLab. |
| `pipelines` | For multi-project triggers, created using the API with `CI_JOB_TOKEN`. |
| `pushes` | Pipeline is triggered by a `git push` by the user. |
| `schedules` | For [scheduled pipelines][schedules]. |
| `triggers` | For pipelines created using a trigger token. |
| `web` | For pipelines created using **Run pipeline** button in GitLab UI (under your project's **Pipelines**). |
| `merge_requests` | When a merge request is created or updated (See [pipelines for merge requests](../merge_request_pipelines/index.md)). |
In the example below, `job` will run only for refs that start with `issue-`,
whereas all branches will be skipped:
......@@ -391,6 +392,24 @@ job:
The above example will run `job` for all branches on `gitlab-org/gitlab-ce`,
except master.
If a job does not have neither `only` nor `except` rule,
`only: ['branches', 'tags']` is set by default.
For example,
```yaml
job:
script: echo 'test'
```
is translated to
```yaml
job:
script: echo 'test'
only: ['branches', 'tags']
```
## `only` and `except` (complex)
> `refs` and `kubernetes` policies introduced in GitLab 10.0
......
......@@ -372,6 +372,16 @@ all your changes will be available to preview by anyone with the Review Apps lin
[Read more about Review Apps.](../../../ci/review_apps/index.md)
## Pipelines for merge requests
When a developer updates a merge request, a pipeline should quickly report back
its result to the developer, but often pipelines take long time to complete
because general branch pipelines contain unnecessary jobs from the merge request standpoint.
You can customize a specific pipeline structure for merge requests in order to
speed the cycle up by running only important jobs.
Learn more about [pipelines for merge requests](../../../ci/merge_request_pipelines/index.md).
## Pipeline status in merge requests
If you've set up [GitLab CI/CD](../../../ci/README.md) in your project,
......
......@@ -119,31 +119,6 @@ on the home page of your project.
If you have a self-hosted GitLab installation, replace `gitlab.com` with your
domain name.
## Instance level Maven endpoint
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/8274) in GitLab Premium 11.6.
If you rely on many packages, it might be inefficient to include the `repository` section
with a unique URL for each package. Instead, you can use the instance level endpoint for
all maven packages stored in GitLab. Only packages you have access to
will be available for download. Here's how the relevant `repository` section of
your `pom.xml` would look like:
```xml
<repositories>
<repository>
<id>gitlab-maven</id>
<url>https://gitlab.com/api/v4/packages/maven</url>
</repository>
</repositories>
```
If you have a self-hosted GitLab installation, replace `gitlab.com` with your
domain name.
You still need a project specific URL for uploading a package
in the `distributionManagement` section.
## Uploading packages
Once you have set up the [authorization](#authorizing-with-the-gitlab-maven-repository)
......
<script>
import { mapActions } from 'vuex';
import SubscriptionTable from './subscription_table.vue';
export default {
name: 'SubscriptionApp',
components: {
SubscriptionTable,
},
props: {
namespaceId: {
type: String,
required: false,
default: null,
},
},
created() {
this.setNamespaceId(this.namespaceId);
},
methods: {
...mapActions('subscription', ['setNamespaceId']),
},
};
</script>
<template>
<subscription-table />
</template>
<script>
import _ from 'underscore';
import { mapActions, mapState, mapGetters } from 'vuex';
import { GlLoadingIcon } from '@gitlab/ui';
import SubscriptionTableRow from './subscription_table_row.vue';
import { CUSTOMER_PORTAL_URL } from '../constants';
import { s__, sprintf } from '~/locale';
export default {
name: 'SubscriptionTable',
components: {
SubscriptionTableRow,
GlLoadingIcon,
},
computed: {
...mapState('subscription', ['isLoading', 'hasError', 'plan', 'rows', 'endpoint']),
...mapGetters('subscription', ['isFreePlan']),
subscriptionHeader() {
let suffix = this.isFreePlan ? '' : s__('SubscriptionTable|subscription');
if (!this.isFreePlan && this.plan.trial) {
suffix += ` - ${s__('SubscriptionTable|Trial')}`;
}
return sprintf(s__('SubscriptionTable|GitLab.com %{planName} %{suffix}'), {
planName: this.isFreePlan ? s__('SubscriptionTable|Free') : _.escape(this.plan.name),
suffix,
});
},
actionButtonText() {
return this.isFreePlan ? s__('SubscriptionTable|Upgrade') : s__('SubscriptionTable|Manage');
},
},
mounted() {
this.fetchSubscription();
},
methods: {
...mapActions('subscription', ['fetchSubscription']),
},
customerPortalUrl: CUSTOMER_PORTAL_URL,
};
</script>
<template>
<div>
<div
v-if="!isLoading && !hasError"
class="card prepend-top-default subscription-table js-subscription-table"
>
<div class="js-subscription-header card-header">
<strong> {{ subscriptionHeader }} </strong>
<div class="controls">
<a
:href="$options.customerPortalUrl"
target="_blank"
rel="noopener noreferrer"
class="btn btn-inverted-secondary"
>
{{ actionButtonText }}
</a>
</div>
</div>
<div class="card-body flex-grid d-flex flex-column flex-sm-row flex-md-row flex-lg-column">
<subscription-table-row
v-for="(row, i) in rows"
:key="`subscription-rows-${i}`"
:header="row.header"
:columns="row.columns"
:is-free-plan="isFreePlan"
/>
</div>
</div>
<gl-loading-icon
v-else-if="isLoading && !hasError"
:label="s__('SubscriptionTable|Loading subscriptions')"
:size="3"
class="prepend-top-10 append-bottom-10"
/>
</div>
</template>
<script>
import { dateInWords } from '~/lib/utils/datetime_utility';
import Icon from '~/vue_shared/components/icon.vue';
import Popover from '~/vue_shared/components/help_popover.vue';
export default {
name: 'SubscriptionTableRow',
components: {
Icon,
Popover,
},
props: {
header: {
type: Object,
required: true,
},
columns: {
type: Array,
required: true,
},
isFreePlan: {
type: Boolean,
required: false,
default: false,
},
},
methods: {
getPopoverOptions(col) {
const defaults = {
placement: 'bottom',
};
return { ...defaults, ...col.popover };
},
getDisplayValue(col) {
if (col.isDate && col.value) {
return dateInWords(new Date(col.value));
}
// let's display '-' instead of 0 for the 'Free' plan
if (this.isFreePlan && col.value === 0) {
return ' - ';
}
return typeof col.value !== 'undefined' && col.value !== null ? col.value : ' - ';
},
},
};
</script>
<template>
<div class="grid-row d-flex flex-grow-1 flex-column flex-sm-column flex-md-column flex-lg-row">
<div class="grid-cell header-cell">
<span class="icon-wrapper">
<icon v-if="header.icon" class="append-right-8" :name="header.icon" aria-hidden="true" />
{{ header.title }}
</span>
</div>
<template v-for="(col, i) in columns">
<div :key="`subscription-col-${i}`" class="grid-cell" :class="[col.hidden ? 'no-value' : '']">
<span class="property-label"> {{ col.label }} </span>
<popover v-if="col.popover" :options="getPopoverOptions(col)" />
<p
class="property-value prepend-top-5 append-bottom-0"
:class="[col.colClass ? col.colClass : '']"
>
{{ getDisplayValue(col) }}
</p>
</div>
</template>
</div>
</template>
export const USAGE_ROW_INDEX = 0;
export const BILLING_ROW_INDEX = 1;
export const CUSTOMER_PORTAL_URL = 'https://customers.gitlab.com/subscriptions';
import Vue from 'vue';
import SubscriptionApp from './components/app.vue';
import store from './stores';
export default (containerId = 'js-billing-plans') => {
const containerEl = document.getElementById(containerId);
if (!containerEl) {
return false;
}
return new Vue({
el: containerEl,
store,
components: {
SubscriptionApp,
},
data() {
const { dataset } = this.$options.el;
const { namespaceId } = dataset;
return {
namespaceId,
};
},
render(createElement) {
return createElement('subscription-app', {
props: {
namespaceId: this.namespaceId,
},
});
},
});
};
import Vue from 'vue';
import Vuex from 'vuex';
import subscription from './modules/subscription/index';
Vue.use(Vuex);
export default () =>
new Vuex.Store({
modules: {
subscription,
},
});
import * as types from './mutation_types';
import createFlash from '~/flash';
import { __ } from '~/locale';
import API from '~/api';
/**
* SUBSCRIPTION TABLE
*/
export const setNamespaceId = ({ commit }, namespaceId) => {
commit(types.SET_NAMESPACE_ID, namespaceId);
};
export const fetchSubscription = ({ dispatch, state }) => {
dispatch('requestSubscription');
return API.userSubscription(state.namespaceId)
.then(({ data }) => dispatch('receiveSubscriptionSuccess', data))
.catch(() => dispatch('receiveSubscriptionError'));
};
export const requestSubscription = ({ commit }) => commit(types.REQUEST_SUBSCRIPTION);
export const receiveSubscriptionSuccess = ({ commit }, response) =>
commit(types.RECEIVE_SUBSCRIPTION_SUCCESS, response);
export const receiveSubscriptionError = ({ commit }) => {
createFlash(__('An error occurred while loading the subscription details.'));
commit(types.RECEIVE_SUBSCRIPTION_ERROR);
};
// prevent babel-plugin-rewire from generating an invalid default during karma tests
export default () => {};
export const isFreePlan = state => state.plan.code === null;
// prevent babel-plugin-rewire from generating an invalid default during karma tests
export default () => {};
import * as actions from './actions';
import * as getters from './getters';
import mutations from './mutations';
import state from './state';
export default {
namespaced: true,
actions,
mutations,
getters,
state,
};
{
"plan": {
"name": "Gold",
"code": "gold",
"trial": false
},
"usage": {
"seats_in_subscription": 100,
"seats_in_use": 98,
"max_seats_used": 104,
"seats_owed": 4
},
"billing": {
"subscription_start_date": "2018-07-11",
"subscription_end_date": "2019-07-11",
"last_invoice": "2018-09-01",
"next_invoice": "2018-10-01"
}
}
export const SET_NAMESPACE_ID = 'SET_NAMESPACE_ID';
export const REQUEST_SUBSCRIPTION = 'REQUEST_SUBSCRIPTION';
export const RECEIVE_SUBSCRIPTION_SUCCESS = 'RECEIVE_SUBSCRIPTION_SUCCESS';
export const RECEIVE_SUBSCRIPTION_ERROR = 'RECEIVE_SUBSCRIPTION_ERROR';
import Vue from 'vue';
import * as types from './mutation_types';
import { USAGE_ROW_INDEX, BILLING_ROW_INDEX } from '../../../constants';
import { convertObjectPropsToCamelCase } from '~/lib/utils/common_utils';
export default {
[types.SET_NAMESPACE_ID](state, payload) {
state.namespaceId = payload;
},
[types.REQUEST_SUBSCRIPTION](state) {
state.isLoading = true;
state.hasError = false;
},
[types.RECEIVE_SUBSCRIPTION_SUCCESS](state, payload) {
const data = convertObjectPropsToCamelCase(payload, { deep: true });
const { plan, usage, billing } = data;
state.plan = plan;
/*
* Update column values for billing and usage row.
* We iterate over the rows within the state
* and update only the column's value property in the state
* with the data we received from the API for the given column
*/
[USAGE_ROW_INDEX, BILLING_ROW_INDEX].forEach(rowIdx => {
const currentRow = state.rows[rowIdx];
currentRow.columns.forEach(currentCol => {
if (rowIdx === USAGE_ROW_INDEX) {
Vue.set(currentCol, 'value', usage[currentCol.id]);
} else if (rowIdx === BILLING_ROW_INDEX) {
Vue.set(currentCol, 'value', billing[currentCol.id]);
}
});
});
state.isLoading = false;
},
[types.RECEIVE_SUBSCRIPTION_ERROR](state) {
state.isLoading = false;
state.hasError = true;
},
};
import { s__ } from '~/locale';
export default () => ({
isLoading: false,
hasError: false,
namespaceId: null,
plan: {
code: null,
name: null,
trial: false,
},
rows: [
{
header: {
icon: 'monitor',
title: s__('SubscriptionTable|Usage'),
},
columns: [
{
id: 'seatsInSubscription',
label: s__('SubscriptionTable|Seats in subscription'),
value: null,
colClass: 'number',
},
{
id: 'seatsInUse',
label: s__('SubscriptionTable|Seats currently in use'),
value: null,
colClass: 'number',
popover: {
content: s__(`SubscriptionTable|Usage count is performed once a day at 12:00 PM.`),
},
},
{
id: 'maxSeatsUsed',
label: s__('SubscriptionTable|Max seats used'),
value: null,
colClass: 'number',
popover: {
content: s__(
'SubscriptionTable|This is the maximum number of users that have existed at the same time since this subscription started.',
),
},
},
{
id: 'seatsOwed',
label: s__('SubscriptionTable|Seats owed'),
value: null,
colClass: 'number',
popover: {
content: s__(
'SubscriptionTable|GitLab allows you to continue using your subscription even if you exceed the number of seats you purchased. You will be required to pay for these seats upon renewal.',
),
},
},
],
},
{
header: {
icon: 'calendar',
title: s__('SubscriptionTable|Billing'),
},
columns: [
{
id: 'subscriptionStartDate',
label: s__('SubscriptionTable|Subscription start date'),
value: null,
isDate: true,
},
{
id: 'subscriptionEndDate',
label: s__('SubscriptionTable|Subscription end date'),
value: null,
isDate: true,
},
{
id: 'lastInvoice',
label: s__('SubscriptionTable|Last invoice'),
value: null,
isDate: true,
popover: {
content: s__(
'SubscriptionTable|This is the last time the GitLab.com team was in contact with you to settle any outstanding balances.',
),
},
hidden: true,
},
{
id: 'nextInvoice',
label: s__('SubscriptionTable|Next invoice'),
value: null,
isDate: true,
popover: {
content: s__(
'SubscriptionTable|This is the next date when the GitLab.com team is scheduled to get in contact with you to settle any outstanding balances.',
),
},
hidden: true,
},
],
},
],
});
import initSubscriptions from 'ee/billings';
document.addEventListener('DOMContentLoaded', () => {
initSubscriptions();
});
......@@ -154,3 +154,56 @@
}
}
}
.subscription-table {
.flex-grid {
.grid-cell {
.property-label {
color: $gl-text-color-secondary;
}
.btn-help {
color: $blue-600;
}
.property-value {
color: $gl-text-color;
&.number {
font-size: 20px;
line-height: 24px;
}
}
.icon-wrapper {
line-height: 16px;
vertical-align: baseline;
svg {
vertical-align: middle;
}
}
&.header-cell {
font-weight: $gl-font-weight-bold;
}
&.no-value {
> * {
display: none;
}
@include media-breakpoint-down(sm) {
display: none;
}
}
}
@include media-breakpoint-up(lg) {
.header-cell {
width: 144px;
flex: none;
}
}
}
}
......@@ -48,12 +48,12 @@ class Admin::Geo::NodesController < Admin::ApplicationController
:url,
:primary,
:selective_sync_type,
:selective_sync_shards,
:namespace_ids,
:repos_max_capacity,
:files_max_capacity,
:verification_max_capacity,
:minimum_reverification_interval
:minimum_reverification_interval,
selective_sync_shards: []
)
end
......
# frozen_string_literal: true
class Packages::MavenPackageFinder
attr_reader :path, :project
attr_reader :project, :path
def initialize(path, project = nil)
@path = path
def initialize(project, path)
@project = project
@path = path
end
def execute
......@@ -17,17 +17,9 @@ class Packages::MavenPackageFinder
private
def scope
if project
project.packages
else
::Packages::Package.all
end
end
# rubocop: disable CodeReuse/ActiveRecord
def packages
scope.joins(:maven_metadatum)
project.packages.joins(:maven_metadatum)
.where(packages_maven_metadata: { path: path })
end
# rubocop: enable CodeReuse/ActiveRecord
......
......@@ -175,6 +175,18 @@ module EE
project
end
# For now, we are not billing for members with a Guest role for subscriptions
# with a Gold plan. The other plans will treat Guest members as a regular member
# for billing purposes.
override :billable_members_count
def billable_members_count(requested_hosted_plan = nil)
if [actual_plan_name, requested_hosted_plan].include?(Namespace::GOLD_PLAN)
users_with_descendants.excluding_guests.count
else
users_with_descendants.count
end
end
private
def custom_project_templates_group_allowed
......
......@@ -31,6 +31,7 @@ module EE
belongs_to :plan
has_one :namespace_statistics
has_one :gitlab_subscription
scope :with_plan, -> { where.not(plan_id: nil) }
......@@ -43,6 +44,8 @@ module EE
validate :validate_plan_name
validate :validate_shared_runner_minutes_support
delegate :trial?, :trial_ends_on, to: :gitlab_subscription, allow_nil: true
before_create :sync_membership_lock_with_parent
# Changing the plan or other details may invalidate this cache
......@@ -104,11 +107,10 @@ module EE
available_features[feature]
end
# The main difference between the "plan" column and this method is that "plan"
# returns nil / "" when it has no plan. Having no plan means it's a "free" plan.
#
def actual_plan
self.plan || Plan.find_by(name: FREE_PLAN)
subscription = find_or_create_subscription
subscription&.hosted_plan
end
def actual_plan_name
......@@ -185,12 +187,20 @@ module EE
def plans
@plans ||=
if parent_id
Plan.where(id: self_and_ancestors.with_plan.reorder(nil).select(:plan_id))
Plan.hosted_plans_for_namespaces(self_and_ancestors.select(:id))
else
Array(plan)
Plan.hosted_plans_for_namespaces(self)
end
end
# When a purchasing a GL.com plan for a User namespace
# we only charge for a single user.
# This method is overwritten in Group where we made the calculation
# for Group namespaces.
def billable_members_count(_requested_hosted_plan = nil)
1
end
def eligible_for_trial?
::Gitlab.com? &&
parent_id.nil? &&
......@@ -199,7 +209,7 @@ module EE
end
def trial_active?
trial_ends_on.present? && trial_ends_on >= Date.today
trial? && trial_ends_on.present? && trial_ends_on >= Date.today
end
def trial_expired?
......@@ -261,5 +271,21 @@ module EE
self.file_template_project_id = nil
end
def find_or_create_subscription
# Hosted subscriptions are only available for root groups for now.
return if parent_id
gitlab_subscription || generate_subscription
end
def generate_subscription
create_gitlab_subscription(
plan_code: plan&.name,