.gitlab-ci.yml 20.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
# This file is a template, and might need editing before it works on your project.
# Auto DevOps
# This CI/CD configuration provides a standard pipeline for
# * building a Docker image (using a buildpack if necessary),
# * storing the image in the container registry,
# * running tests from a buildpack,
# * running code quality analysis,
# * creating a review app for each topic branch,
# * and continuous deployment to production
#
# In order to deploy, you must have a Kubernetes cluster configured either
# via a project integration, or via group/project variables.
# AUTO_DEVOPS_DOMAIN must also be set as a variable at the group or project
# level, or manually added below.
#
# If you want to deploy to staging first, or enable canary deploys,
# uncomment the relevant jobs in the pipeline below.
#
# If Auto DevOps fails to detect the proper buildpack, or if you want to
# specify a custom buildpack, set a project variable `BUILDPACK_URL` to the
# repository URL of the buildpack.
# e.g. BUILDPACK_URL=https://github.com/heroku/heroku-buildpack-ruby.git#v142
# If you need multiple buildpacks, add a file to your project called
# `.buildpacks` that contains the URLs, one on each line, in order.
# Note: Auto CI does not work with multiple buildpacks yet

Rémy Coutable's avatar
Rémy Coutable committed
27
image: registry.gitlab.com/gitlab-org/gitlab-build-images:gitlab-charts-build-base
28 29

variables:
30
  GOOGLE_APPLICATION_CREDENTIALS: ${CI_PROJECT_DIR}/.google_keyfile.json
31 32 33 34 35 36 37 38 39
  # AUTO_DEVOPS_DOMAIN is the application deployment domain and should be set as a variable at the group or project level.
  # AUTO_DEVOPS_DOMAIN: domain.example.com

  POSTGRES_USER: user
  POSTGRES_PASSWORD: testing-password
  POSTGRES_ENABLED: "false"
  POSTGRES_DB: $CI_ENVIRONMENT_SLUG

stages:
40
  - prepare
41 42 43
  - review
  - staging
  - canary
Ahmad Hassan's avatar
Ahmad Hassan committed
44
  - stable
45
  - specs
Ahmad Hassan's avatar
Ahmad Hassan committed
46
  - qa
47
  - package
48 49
  - cleanup

50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
lint_package:
  stage: package
  when: always
  script:
    - helm init --client-only
    - helm repo add gitlab https://charts.gitlab.io
    - helm dependencies update
    - helm lint --set certmanager-issuer.email=support@gitlab.com
    - mkdir -p build
    - helm package -d build .
  artifacts:
    expire_in: 3d
    paths:
    - build
  except:
    - tags

release_package:
  stage: package
69
  when: always
70 71 72 73 74 75
  script:
    - curl --request POST --form "token=$CI_JOB_TOKEN" --form ref=master
        --form "variables[CHART_NAME]=$CI_PROJECT_NAME"
        --form "variables[RELEASE_REF]=$CI_COMMIT_REF_NAME"
        https://gitlab.com/api/v4/projects/2860651/trigger/pipeline
  only:
76 77
    - tags@charts/gitlab

78

79 80 81 82 83 84 85
review:
  stage: review
  script:
    - check_kube_domain
    - ensure_namespace
    - install_tiller
    - create_secret
86
    - install_external_dns
Jason Plum's avatar
Jason Plum committed
87
    - if ! crdExists || previousDeployFailed ; then OPERATOR_BOOTSTRAP=true deploy ; fi
88
    - deploy
89
    - add_license
Ahmad Hassan's avatar
Ahmad Hassan committed
90
    - echo "export QA_ENVIRONMENT_URL=gitlab-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN" >> variables
91 92 93 94
    - echo "export GITLAB_ROOT_DOMAIN=$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN"        >> variables
    - echo "export GITLAB_URL=gitlab-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN"         >> variables
    - echo "export REGISTRY_URL=registry-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN"     >> variables
    - echo "export S3_ENDPOINT=https://minio-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN" >> variables
Ahmad Hassan's avatar
Ahmad Hassan committed
95 96 97
  artifacts:
    paths:
    - variables
98 99 100 101 102
  environment:
    name: review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN
    on_stop: stop_review
  variables:
103
    HOST_SUFFIX: "$CI_ENVIRONMENT_SLUG"
104 105 106 107 108 109 110 111 112
    DOMAIN: "-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN"
  only:
    refs:
      - branches
    kubernetes: active
  except:
    - master

stop_review:
113
  stage: review
114 115
  variables:
    GIT_CHECKOUT: "false"
116
  script:
117
    - git checkout master
118
    - delete
119
    - cleanup
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
  environment:
    name: review/$CI_COMMIT_REF_NAME
    action: stop
  when: manual
  allow_failure: true
  only:
    refs:
      - branches
    kubernetes: active
  except:
    - master

# Keys that start with a dot (.) will not be processed by GitLab CI.
# Staging and canary jobs are disabled by default, to enable them
# remove the dot (.) before the job name.
# https://docs.gitlab.com/ee/ci/yaml/README.html#hidden-keys

# Staging deploys are disabled by default since
# continuous deployment to production is enabled by default
# If you prefer to automatically deploy to staging and
# only manually promote to production, enable this job by removing the dot (.),
# and uncomment the `when: manual` line in the `production` job.

.staging:
  stage: staging
  script:
    - check_kube_domain
    - check_domain_ip
#    - download_chart
    - ensure_namespace
    - install_tiller
    - create_secret
    - deploy
  environment:
    name: staging
    url: https://gitlab-staging.$AUTO_DEVOPS_DOMAIN
  variables:
    DOMAIN: -staging.$AUTO_DEVOPS_DOMAIN
  only:
    refs:
      - master
    kubernetes: active

# Canaries are disabled by default, but if you want them,
# and know what the downsides are, enable this job by removing the dot (.),
# and uncomment the `when: manual` line in the `production` job.

.canary:
  stage: canary
  script:
    - check_kube_domain
    - check_domain_ip
#    - download_chart
    - ensure_namespace
    - install_tiller
    - create_secret
    - deploy canary
  environment:
    name: production
    url: https://gitlab.$AUTO_DEVOPS_DOMAIN
  variables:
    DOMAIN: ".$AUTO_DEVOPS_DOMAIN"
  when: manual
  only:
    refs:
      - master
    kubernetes: active

# This job continuously deploys to production on every push to `master`.
# To make this a manual process, either because you're enabling `staging`
# or `canary` deploys, or you simply want more control over when you deploy
# to production, uncomment the `when: manual` line in the `production` job.

Ahmad Hassan's avatar
Ahmad Hassan committed
193 194
stable:
  stage: stable
195 196 197 198 199 200 201
  script:
    - check_kube_domain
    - check_domain_ip
    - download_chart
    - ensure_namespace
    - install_tiller
    - create_secret
202
    - if ! crdExists || previousDeployFailed ; then OPERATOR_BOOTSTRAP=true deploy ; fi
203 204
    - deploy
    - delete canary
Ahmad Hassan's avatar
Ahmad Hassan committed
205
    - echo "export QA_ENVIRONMENT_URL=gitlab.$AUTO_DEVOPS_DOMAIN" >> variables
206 207
    - echo "export GITLAB_ROOT_DOMAIN=$AUTO_DEVOPS_DOMAIN"        >> variables
    - echo "export S3_ENDPOINT=https://minio.$AUTO_DEVOPS_DOMAIN" >> variables
Ahmad Hassan's avatar
Ahmad Hassan committed
208 209 210
  artifacts:
    paths:
    - variables
211
  environment:
212
    name: production
213 214 215 216 217 218 219 220 221
    url: https://gitlab.$AUTO_DEVOPS_DOMAIN
  variables:
    DOMAIN: ".$AUTO_DEVOPS_DOMAIN"
#  when: manual
  only:
    refs:
      - master
    kubernetes: active

222 223 224 225 226
review_helm_test:
  stage: qa
  environment:
    name: review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN
227
    on_stop: stop_review
228 229
  script:
    - export TILLER_NAMESPACE=$KUBE_NAMESPACE
230
    - helm test --cleanup "$CI_ENVIRONMENT_SLUG"
231 232 233
  only:
    refs:
      - branches
234
    kubernetes: active
235 236 237 238 239 240 241 242 243 244 245
  except:
    refs:
      - master

production_helm_test:
  stage: qa
  environment:
    name: production
    url: https://gitlab.$AUTO_DEVOPS_DOMAIN
  script:
    - export TILLER_NAMESPACE=$KUBE_NAMESPACE
246
    - helm test --cleanup "$CI_ENVIRONMENT_SLUG"
247 248
  only:
    refs:
249
      - master@charts/gitlab
250
    kubernetes: active
251

252

253 254 255 256 257 258 259 260 261 262 263 264
debug_review:
  stage: qa
  when: on_failure
  script:
    - kubectl -n "$KUBE_NAMESPACE" describe pod
    - kubectl -n "$KUBE_NAMESPACE" get pod,jobs,secret,ing,cm,sa,svc,role,rolebinding,pvc
  artifacts:
    paths:
    - variables
  environment:
    name: review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN
265
    on_stop: stop_review
266 267 268 269 270
  variables:
    HOST_SUFFIX: "$CI_ENVIRONMENT_SLUG"
    DOMAIN: "-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN"
  only:
    refs:
271
      - branches
272 273 274 275
    kubernetes: active
  except:
    - master

276
danger-review:
277
  image: registry.gitlab.com/gitlab-org/gitlab-build-images:danger
278 279 280
  stage: prepare
  cache: {}
  only:
281 282 283
    variables:
      - $DANGER_GITLAB_API_TOKEN
  except:
284
    refs:
285 286
      - master
      - tags
287 288 289 290
  script:
    - git version
    - danger --fail-on-errors=true

291 292 293 294 295 296 297 298 299 300 301 302 303
check_docs_internal_links:
  image: "registry.gitlab.com/gitlab-org/gitlab-build-images:gitlab-docs-lint"
  stage: prepare
  cache: {}
  dependencies: []
  before_script: []
  script:
    - mv doc/ /tmp/gitlab-docs/content/charts
    - cd /tmp/gitlab-docs
    # Build HTML from Markdown
    - bundle exec nanoc
    # Check the internal links
    - bundle exec nanoc check internal_links
304 305
    # Check the internal anchor links
    - bundle exec nanoc check internal_anchors
306

307 308 309 310 311 312 313 314 315 316 317 318
# ---------------------------------------------------------------------------

.auto_devops: &auto_devops |
  # Auto DevOps variables and functions
  [[ "$TRACE" ]] && set -x
  auto_database_url=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${CI_ENVIRONMENT_SLUG}-postgres:5432/${POSTGRES_DB}
  export DATABASE_URL=${DATABASE_URL-$auto_database_url}
  export CI_APPLICATION_REPOSITORY=$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG
  export CI_APPLICATION_TAG=$CI_COMMIT_SHA
  export CI_CONTAINER_NAME=ci_job_build_${CI_JOB_ID}
  export TILLER_NAMESPACE=$KUBE_NAMESPACE

319 320
  function previousDeployFailed() {
    set +e
321
    echo "Checking for previous deployment of $CI_ENVIRONMENT_SLUG"
322 323 324 325
    deployment_status=$(helm status $CI_ENVIRONMENT_SLUG >/dev/null 2>&1)
    status=$?
    # if `status` is `0`, deployment exists, has a status
    if [ $status -eq 0 ]; then
326
      echo "Previous deployment found, checking status"
327 328
      deployment_status=$(helm status $CI_ENVIRONMENT_SLUG | grep ^STATUS | cut -d' ' -f2)
      echo "Previous deployment state: $deployment_status"
329
      if [[ "$deployment_status" == "FAILED" || "$deployment_status" == "PENDING_UPGRADE" || "$deployment_status" == "PENDING_INSTALL" ]]; then
330
        status=0;
331 332
      else
        status=1;
333 334 335 336 337 338 339 340
      fi
    else
      echo "Previous deployment NOT found."
    fi
    set -e
    return $status
  }

Jason Plum's avatar
Jason Plum committed
341 342 343 344 345 346 347 348 349 350 351 352
  function crdExists() {
    echo "Checking for existing GitLab Operator CRD"
    kubectl get crd/gitlabs.${CI_ENVIRONMENT_SLUG}.gitlab.com >/dev/null 2>&1
    status=$?
    if [ $status -eq 0 ]; then
      echo "GitLab Operator CRD exists."
    else
      echo "GitLab Operator CRD does NOT exist."
    fi
    return $status
  }

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
  function deploy() {
    track="${1-stable}"
    name="$CI_ENVIRONMENT_SLUG"

    if [[ "$track" != "stable" ]]; then
      name="$name-$track"
    fi

    replicas="1"
    service_enabled="false"
    postgres_enabled="$POSTGRES_ENABLED"
    # canary uses stable db
    [[ "$track" == "canary" ]] && postgres_enabled="false"

    env_track=$( echo $track | tr -s  '[:lower:]'  '[:upper:]' )
    env_slug=$( echo ${CI_ENVIRONMENT_SLUG//-/_} | tr -s  '[:lower:]'  '[:upper:]' )

    if [[ "$track" == "stable" ]]; then
      # for stable track get number of replicas from `PRODUCTION_REPLICAS`
      eval new_replicas=\$${env_slug}_REPLICAS
      service_enabled="true"
    else
      # for all tracks get number of replicas from `CANARY_PRODUCTION_REPLICAS`
      eval new_replicas=\$${env_track}_${env_slug}_REPLICAS
    fi
    if [[ -n "$new_replicas" ]]; then
      replicas="$new_replicas"
    fi

    #ROOT_PASSWORD=$(cat /dev/urandom | LC_TYPE=C tr -dc "[:alpha:]" | head -c 16)
    #echo "Generated root login: $ROOT_PASSWORD"
384 385
    kubectl create secret generic "${CI_ENVIRONMENT_SLUG}-gitlab-initial-root-password" --from-literal=password=$ROOT_PASSWORD -o yaml --dry-run | kubectl replace --force -f -

386
    # YAML_FILE=""${AUTO_DEVOPS_DOMAIN//\./-}.yaml"
387
    # Cleanup and previous installs, as FAILED and PENDING_UPGRADE will cause errors with `upgrade`
388
    if [ "$CI_ENVIRONMENT_SLUG" != "production" ] && previousDeployFailed ; then
389 390
      echo "Deployment in bad state, cleaning up $CI_ENVIRONMENT_SLUG"
      delete
391
      cleanup
392
    fi
393
    helm repo add gitlab https://charts.gitlab.io/
394 395
    helm dep update .

Jason Plum's avatar
Jason Plum committed
396 397 398 399 400 401
    # If OPERATOR_BOOTSTRAP is set, we _do not_ want to use --wait / --timeout
    WAIT="--wait --timeout 600"
    if [ -n "${OPERATOR_BOOTSTRAP}" ]; then
      WAIT=""
    fi

402
    helm upgrade --install \
Jason Plum's avatar
Jason Plum committed
403
      $WAIT \
404
      --set releaseOverride="$CI_ENVIRONMENT_SLUG" \
405 406
      --set global.hosts.hostSuffix="$HOST_SUFFIX" \
      --set global.hosts.domain="$AUTO_DEVOPS_DOMAIN" \
407
      --set global.ingress.annotations."external-dns\.alpha\.kubernetes\.io/ttl"="10" \
408
      --set global.ingress.tls.secretName=helm-charts-win-tls \
409 410
      --set global.ingress.configureCertmanager=false \
      --set certmanager.install=false \
411 412
      --set gitlab.unicorn.maxReplicas=3 \
      --set gitlab.sidekiq.maxReplicas=2 \
413
      --set gitlab.task-runner.enabled=true \
414 415 416
      --set gitlab.gitlab-shell.maxReplicas=3 \
      --set redis.resources.requests.cpu=100m \
      --set minio.resources.requests.cpu=100m \
Jason Plum's avatar
Jason Plum committed
417 418
      --set global.operator.enabled=true \
      --set global.operator.bootstrap=${OPERATOR_BOOTSTRAP-false} \
419 420 421 422 423 424
      --namespace="$KUBE_NAMESPACE" \
      --version="$CI_PIPELINE_ID-$CI_JOB_ID" \
      "$name" \
      .
  }

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
  function add_license() {
    if [ -z "${REVIEW_APPS_EE_LICENSE}" ]; then echo "License not found" && return; fi

    while [ -z "$(kubectl get pods -n ${KUBE_NAMESPACE} --field-selector=status.phase=Running -lapp=task-runner,release=${CI_ENVIRONMENT_SLUG} --no-headers -o=custom-columns=NAME:.metadata.name)" ]; do
      echo "Waiting till task-runner pod is ready";
      sleep 5;
    done

    task_runner_pod=$(kubectl get pods -n ${KUBE_NAMESPACE} --field-selector=status.phase=Running -lapp=task-runner,release=${CI_ENVIRONMENT_SLUG} --no-headers -o=custom-columns=NAME:.metadata.name)

    if [ -z "${task_runner_pod}" ]; then echo "Task runner pod not found" && return; fi
    echo "Task runner pod is ${task_runner_pod}"

    echo "${REVIEW_APPS_EE_LICENSE}" > /tmp/license.gitlab
    kubectl -n "$KUBE_NAMESPACE" cp /tmp/license.gitlab ${task_runner_pod}:/tmp/license.gitlab
    rm /tmp/license.gitlab

    kubectl -n "$KUBE_NAMESPACE" exec -it ${task_runner_pod} -- /srv/gitlab/bin/rails runner -e production \
443 444 445
     '
     content = File.read("/tmp/license.gitlab").strip;
     FileUtils.rm_f("/tmp/license.gitlab");
446 447

     unless License.where(data:content).empty?
448
       puts "License already exists";
449 450 451 452
       Kernel.exit 0;
     end

     unless License.new(data: content).save
453
       puts "Could not add license";
454 455 456
       Kernel.exit 0;
     end

457 458
     puts "License added";
     '
459 460
  }

461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
  function setup_test_db() {
    if [ -z ${KUBERNETES_PORT+x} ]; then
      DB_HOST=postgres
    else
      DB_HOST=localhost
    fi
    export DATABASE_URL="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DB_HOST}:5432/${POSTGRES_DB}"
  }

  function download_chart() {
    if [[ ! -d chart ]]; then
      auto_chart=${AUTO_DEVOPS_CHART:-gitlab/auto-deploy-app}
      auto_chart_name=$(basename $auto_chart)
      auto_chart_name=${auto_chart_name%.tgz}
    else
      auto_chart="chart"
      auto_chart_name="chart"
    fi

    helm init --client-only
    helm repo add gitlab https://charts.gitlab.io
    if [[ ! -d "$auto_chart" ]]; then
      helm fetch ${auto_chart} --untar
    fi
    if [ "$auto_chart_name" != "chart" ]; then
      mv ${auto_chart_name} chart
    fi

    helm dependency update chart/
    helm dependency build chart/
  }

  function ensure_namespace() {
    kubectl describe namespace "$KUBE_NAMESPACE" || kubectl create namespace "$KUBE_NAMESPACE"
  }

  function check_kube_domain() {
    if [ -z ${AUTO_DEVOPS_DOMAIN+x} ]; then
      echo "In order to deploy, AUTO_DEVOPS_DOMAIN must be set as a variable at the group or project level, or manually added in .gitlab-cy.yml"
      false
    else
      true
    fi
  }

  function check_domain_ip() {
    # Expect the `DOMAIN` is a wildcard.
508
    domain_ip=$(nslookup gitlab$DOMAIN 2>/dev/null | grep "Address 1:" | cut -d' ' -f3)
509
    if [ -z $domain_ip ]; then
510
      echo "There was a problem resolving the IP of 'gitlab$DOMAIN'. Be sure you have configured a DNS entry."
511 512 513
      false
    else
      export DOMAIN_IP=$domain_ip
514
      echo "Found IP for gitlab$DOMAIN: $DOMAIN_IP"
515 516 517 518 519 520
      true
    fi
  }

  function install_tiller() {
    echo "Checking Tiller..."
521
    helm init --upgrade --service-account tiller
522 523 524 525 526 527 528 529
    kubectl rollout status -n "$TILLER_NAMESPACE" -w "deployment/tiller-deploy"
    if ! helm version --debug; then
      echo "Failed to init Tiller."
      return 1
    fi
    echo ""
  }

530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
  function install_external_dns() {
    echo "Checking External DNS..."
    release_name="gitlab-external-dns"
    if ! helm status --tiller-namespace "${TILLER_NAMESPACE}" "${release_name}" > /dev/null 2>&1 ; then
      # We need to store the credentials in a secret
      kubectl create secret generic "${release_name}-secret" --from-literal="credentials.json=${GOOGLE_CLOUD_KEYFILE_JSON}"
      helm install stable/external-dns \
        -n "${release_name}" \
        --namespace "${TILLER_NAMESPACE}" \
        --set provider="google" \
        --set domain-filter[0]="helm-charts.win" \
        --set google.project="${GOOGLE_PROJECT_ID}" \
        --set google.serviceAccountSecret="${release_name}-secret" \
        --set txtOwnerId="${TILLER_NAMESPACE}" \
        --set rbac.create="true" \
        --set policy="sync"
    fi
  }

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
  function create_secret() {
    kubectl create secret -n "$KUBE_NAMESPACE" \
      docker-registry gitlab-registry-docker \
      --docker-server="$CI_REGISTRY" \
      --docker-username="$CI_REGISTRY_USER" \
      --docker-password="$CI_REGISTRY_PASSWORD" \
      --docker-email="$GITLAB_USER_EMAIL" \
      -o yaml --dry-run | kubectl replace -n "$KUBE_NAMESPACE" --force -f -
  }

  function delete() {
    track="${1-stable}"
    name="$CI_ENVIRONMENT_SLUG"

    if [[ "$track" != "stable" ]]; then
      name="$name-$track"
    fi
566
    helm delete --purge "$name" || true
567 568
  }

569
  function cleanup() {
570
    gitlabs=''
Jason Plum's avatar
Jason Plum committed
571
    if crdExists ; then
572
      gitlabs=',gitlabs'
Jason Plum's avatar
Jason Plum committed
573 574 575
    fi

    kubectl -n "$KUBE_NAMESPACE" get ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa,crd${gitlabs} 2>&1 \
576
      | grep "$CI_ENVIRONMENT_SLUG" \
577 578
      | awk '{print $1}' \
      | xargs kubectl -n "$KUBE_NAMESPACE" delete \
579
      || true
580 581
  }

582
.specs: &specs
583
  image: registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.5.3-git-2.18-chrome-71.0-node-8.x-yarn-1.12-graphicsmagick-1.3.29-docker-18.06.1
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
  stage: specs
  services:
  - docker:dind
  variables:
    DOCKER_DRIVER: overlay2
    DOCKER_HOST: tcp://docker:2375
    GITLAB_PASSWORD: $ROOT_PASSWORD
    RELEASE_NAME: $CI_ENVIRONMENT_SLUG
    S3_CONFIG_PATH: /etc/gitlab/minio
  script:
    - source variables
    - apt-get update && apt-get install -y --no-install-recommends curl ca-certificates
    - curl -LsO https://storage.googleapis.com/kubernetes-release/release/v1.9.3/bin/linux/amd64/kubectl
    - chmod +x kubectl
    - mv kubectl /usr/local/bin/kubectl
    - mkdir -p /etc/gitlab/minio
    - kubectl get secret ${CI_ENVIRONMENT_SLUG}-minio-secret -o jsonpath='{.data.accesskey}' | base64 --decode > /etc/gitlab/minio/accesskey
    - kubectl get secret ${CI_ENVIRONMENT_SLUG}-minio-secret -o jsonpath='{.data.secretkey}' | base64 --decode > /etc/gitlab/minio/secretkey
    - bundle install -j $(nproc) --without non_test --path gems
    - bundle exec rspec -c -f d spec
  after_script:
    - *auto_devops
    - add_license
607 608 609 610
  artifacts:
    when: on_failure
    expire_in: 7d
    paths:
611
    - tmp/capybara
612 613 614 615 616 617 618 619 620 621 622 623 624
  cache:
    key: "${CI_JOB_NAME}"
    paths:
    - gems

review_specs:
  <<: *specs
  environment:
    name: review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN
    on_stop: stop_review
  only:
    refs:
625 626
      - branches
    kubernetes: active
627 628 629 630 631 632 633 634 635 636 637
  except:
    refs:
      - master

production_specs:
  <<: *specs
  environment:
    name: production
    url: https://gitlab.$AUTO_DEVOPS_DOMAIN
  only:
    refs:
638 639 640
      - master
    kubernetes: active

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
qa:
  image: registry.gitlab.com/gitlab-org/gitlab-omnibus-builder:ruby_docker-0.0.7
  stage: qa
  services:
  - docker:dind
  variables:
    DOCKER_DRIVER: overlay2
    DOCKER_HOST: tcp://docker:2375
    QA_ARTIFACTS_DIR: $CI_PROJECT_DIR
  script:
    - docker login -u gitlab-ci-token -p "$CI_JOB_TOKEN" "$CI_REGISTRY"
    - gem install gitlab-qa
    - source variables
    - app_version=$(ruby -e "require 'yaml'; puts YAML.safe_load(File.read('Chart.yaml'))['appVersion']")
    - qa_version="nightly"
    - if [ "$app_version" != "master"  ]; then
    -   qa_version="${app_version}-ee"
    - fi
659
    - GITLAB_USERNAME=root GITLAB_PASSWORD=$ROOT_PASSWORD GITLAB_ADMIN_USERNAME=root GITLAB_ADMIN_PASSWORD=$ROOT_PASSWORD EE_LICENSE=$REVIEW_APPS_EE_LICENSE gitlab-qa Test::Instance::Any EE:$qa_version https://$QA_ENVIRONMENT_URL
660 661 662 663 664 665 666 667
  artifacts:
    when: on_failure
    expire_in: 7d
    paths:
    - ./gitlab-qa-run-*
  only:
    refs:
      - branches
668
    kubernetes: active
669 670 671
  retry: 1
  allow_failure: true

672 673
before_script:
  - *auto_devops