.gitlab-ci.yml 19.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
# This file is a template, and might need editing before it works on your project.
# Auto DevOps
# This CI/CD configuration provides a standard pipeline for
# * building a Docker image (using a buildpack if necessary),
# * storing the image in the container registry,
# * running tests from a buildpack,
# * running code quality analysis,
# * creating a review app for each topic branch,
# * and continuous deployment to production
#
# In order to deploy, you must have a Kubernetes cluster configured either
# via a project integration, or via group/project variables.
# AUTO_DEVOPS_DOMAIN must also be set as a variable at the group or project
# level, or manually added below.
#
# If you want to deploy to staging first, or enable canary deploys,
# uncomment the relevant jobs in the pipeline below.
#
# If Auto DevOps fails to detect the proper buildpack, or if you want to
# specify a custom buildpack, set a project variable `BUILDPACK_URL` to the
# repository URL of the buildpack.
# e.g. BUILDPACK_URL=https://github.com/heroku/heroku-buildpack-ruby.git#v142
# If you need multiple buildpacks, add a file to your project called
# `.buildpacks` that contains the URLs, one on each line, in order.
# Note: Auto CI does not work with multiple buildpacks yet

Rémy Coutable's avatar
Rémy Coutable committed
27
image: registry.gitlab.com/gitlab-org/gitlab-build-images:gitlab-charts-build-base
28 29

variables:
30
  GOOGLE_APPLICATION_CREDENTIALS: ${CI_PROJECT_DIR}/.google_keyfile.json
31 32 33 34 35 36 37 38 39
  # AUTO_DEVOPS_DOMAIN is the application deployment domain and should be set as a variable at the group or project level.
  # AUTO_DEVOPS_DOMAIN: domain.example.com

  POSTGRES_USER: user
  POSTGRES_PASSWORD: testing-password
  POSTGRES_ENABLED: "false"
  POSTGRES_DB: $CI_ENVIRONMENT_SLUG

stages:
40
  - prepare
41 42 43
  - review
  - staging
  - canary
Ahmad Hassan's avatar
Ahmad Hassan committed
44
  - stable
Ahmad Hassan's avatar
Ahmad Hassan committed
45
  - specs
Ahmad Hassan's avatar
Ahmad Hassan committed
46
  - qa
47
  - package
48 49
  - cleanup

50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
lint_package:
  stage: package
  when: always
  script:
    - helm init --client-only
    - helm repo add gitlab https://charts.gitlab.io
    - helm dependencies update
    - helm lint --set certmanager-issuer.email=support@gitlab.com
    - mkdir -p build
    - helm package -d build .
  artifacts:
    expire_in: 3d
    paths:
    - build
  except:
    - tags

release_package:
  stage: package
69
  when: always
70 71 72 73 74 75
  script:
    - curl --request POST --form "token=$CI_JOB_TOKEN" --form ref=master
        --form "variables[CHART_NAME]=$CI_PROJECT_NAME"
        --form "variables[RELEASE_REF]=$CI_COMMIT_REF_NAME"
        https://gitlab.com/api/v4/projects/2860651/trigger/pipeline
  only:
76 77
    - tags@charts/gitlab

78

79 80 81 82 83 84 85
review:
  stage: review
  script:
    - check_kube_domain
    - ensure_namespace
    - install_tiller
    - create_secret
86
    - install_external_dns
87
    - deploy
88
    - add_license
Ahmad Hassan's avatar
Ahmad Hassan committed
89
    - echo "export QA_ENVIRONMENT_URL=gitlab-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN" >> variables
Ahmad Hassan's avatar
Ahmad Hassan committed
90 91 92 93
    - echo "export GITLAB_ROOT_DOMAIN=$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN"        >> variables
    - echo "export GITLAB_URL=gitlab-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN"         >> variables
    - echo "export REGISTRY_URL=registry-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN"     >> variables
    - echo "export S3_ENDPOINT=https://minio-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN" >> variables
Ahmad Hassan's avatar
Ahmad Hassan committed
94 95 96
  artifacts:
    paths:
    - variables
97 98 99 100 101
  environment:
    name: review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN
    on_stop: stop_review
  variables:
102
    HOST_SUFFIX: "$CI_ENVIRONMENT_SLUG"
103 104 105 106 107 108 109 110 111
    DOMAIN: "-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN"
  only:
    refs:
      - branches
    kubernetes: active
  except:
    - master

stop_review:
112
  stage: review
113 114
  variables:
    GIT_CHECKOUT: "false"
115
  script:
116
    - git checkout master
117
    - delete
118
    - cleanup
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
  environment:
    name: review/$CI_COMMIT_REF_NAME
    action: stop
  when: manual
  allow_failure: true
  only:
    refs:
      - branches
    kubernetes: active
  except:
    - master

# Keys that start with a dot (.) will not be processed by GitLab CI.
# Staging and canary jobs are disabled by default, to enable them
# remove the dot (.) before the job name.
# https://docs.gitlab.com/ee/ci/yaml/README.html#hidden-keys

# Staging deploys are disabled by default since
# continuous deployment to production is enabled by default
# If you prefer to automatically deploy to staging and
# only manually promote to production, enable this job by removing the dot (.),
# and uncomment the `when: manual` line in the `production` job.

.staging:
  stage: staging
  script:
    - check_kube_domain
    - check_domain_ip
#    - download_chart
    - ensure_namespace
    - install_tiller
    - create_secret
    - deploy
  environment:
    name: staging
    url: https://gitlab-staging.$AUTO_DEVOPS_DOMAIN
  variables:
    DOMAIN: -staging.$AUTO_DEVOPS_DOMAIN
  only:
    refs:
      - master
    kubernetes: active

# Canaries are disabled by default, but if you want them,
# and know what the downsides are, enable this job by removing the dot (.),
# and uncomment the `when: manual` line in the `production` job.

.canary:
  stage: canary
  script:
    - check_kube_domain
    - check_domain_ip
#    - download_chart
    - ensure_namespace
    - install_tiller
    - create_secret
    - deploy canary
  environment:
    name: production
    url: https://gitlab.$AUTO_DEVOPS_DOMAIN
  variables:
    DOMAIN: ".$AUTO_DEVOPS_DOMAIN"
  when: manual
  only:
    refs:
      - master
    kubernetes: active

# This job continuously deploys to production on every push to `master`.
# To make this a manual process, either because you're enabling `staging`
# or `canary` deploys, or you simply want more control over when you deploy
# to production, uncomment the `when: manual` line in the `production` job.

Ahmad Hassan's avatar
Ahmad Hassan committed
192 193
stable:
  stage: stable
194 195 196 197 198 199 200 201 202
  script:
    - check_kube_domain
    - check_domain_ip
    - download_chart
    - ensure_namespace
    - install_tiller
    - create_secret
    - deploy
    - delete canary
Ahmad Hassan's avatar
Ahmad Hassan committed
203
    - echo "export QA_ENVIRONMENT_URL=gitlab.$AUTO_DEVOPS_DOMAIN" >> variables
Ahmad Hassan's avatar
Ahmad Hassan committed
204 205
    - echo "export GITLAB_ROOT_DOMAIN=$AUTO_DEVOPS_DOMAIN"        >> variables
    - echo "export S3_ENDPOINT=https://minio.$AUTO_DEVOPS_DOMAIN" >> variables
Ahmad Hassan's avatar
Ahmad Hassan committed
206 207 208
  artifacts:
    paths:
    - variables
209
  environment:
210
    name: production
211 212 213 214 215 216 217 218 219
    url: https://gitlab.$AUTO_DEVOPS_DOMAIN
  variables:
    DOMAIN: ".$AUTO_DEVOPS_DOMAIN"
#  when: manual
  only:
    refs:
      - master
    kubernetes: active

220 221 222 223 224
review_helm_test:
  stage: qa
  environment:
    name: review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN
225
    on_stop: stop_review
226 227
  script:
    - export TILLER_NAMESPACE=$KUBE_NAMESPACE
228
    - helm test --cleanup "$CI_ENVIRONMENT_SLUG"
229 230 231
  only:
    refs:
      - branches
232 233 234 235 236 237 238 239 240 241 242
  except:
    refs:
      - master

production_helm_test:
  stage: qa
  environment:
    name: production
    url: https://gitlab.$AUTO_DEVOPS_DOMAIN
  script:
    - export TILLER_NAMESPACE=$KUBE_NAMESPACE
243
    - helm test --cleanup "$CI_ENVIRONMENT_SLUG"
244 245
  only:
    refs:
246 247
      - master@charts/gitlab

248

249 250 251 252 253 254 255 256 257 258 259 260
debug_review:
  stage: qa
  when: on_failure
  script:
    - kubectl -n "$KUBE_NAMESPACE" describe pod
    - kubectl -n "$KUBE_NAMESPACE" get pod,jobs,secret,ing,cm,sa,svc,role,rolebinding,pvc
  artifacts:
    paths:
    - variables
  environment:
    name: review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN
261
    on_stop: stop_review
262 263 264 265 266
  variables:
    HOST_SUFFIX: "$CI_ENVIRONMENT_SLUG"
    DOMAIN: "-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN"
  only:
    refs:
267
      - branches@charts/gitlab
268 269 270 271
    kubernetes: active
  except:
    - master

272
danger-review:
273
  image: registry.gitlab.com/gitlab-org/gitlab-build-images:danger
274 275 276
  stage: prepare
  cache: {}
  only:
277 278 279
    variables:
      - $DANGER_GITLAB_API_TOKEN
  except:
280
    refs:
281 282
      - master
      - tags
283 284 285 286
  script:
    - git version
    - danger --fail-on-errors=true

287 288 289 290 291 292 293 294 295 296 297 298
# ---------------------------------------------------------------------------

.auto_devops: &auto_devops |
  # Auto DevOps variables and functions
  [[ "$TRACE" ]] && set -x
  auto_database_url=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${CI_ENVIRONMENT_SLUG}-postgres:5432/${POSTGRES_DB}
  export DATABASE_URL=${DATABASE_URL-$auto_database_url}
  export CI_APPLICATION_REPOSITORY=$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG
  export CI_APPLICATION_TAG=$CI_COMMIT_SHA
  export CI_CONTAINER_NAME=ci_job_build_${CI_JOB_ID}
  export TILLER_NAMESPACE=$KUBE_NAMESPACE

299 300
  function previousDeployFailed() {
    set +e
301
    echo "Checking for previous deployment of $CI_ENVIRONMENT_SLUG"
302 303 304 305
    deployment_status=$(helm status $CI_ENVIRONMENT_SLUG >/dev/null 2>&1)
    status=$?
    # if `status` is `0`, deployment exists, has a status
    if [ $status -eq 0 ]; then
306
      echo "Previous deployment found, checking status"
307 308
      deployment_status=$(helm status $CI_ENVIRONMENT_SLUG | grep ^STATUS | cut -d' ' -f2)
      echo "Previous deployment state: $deployment_status"
309
      if [[ "$deployment_status" == "FAILED" || "$deployment_status" == "PENDING_UPGRADE" || "$deployment_status" == "PENDING_INSTALL" ]]; then
310
        status=0;
311 312
      else
        status=1;
313 314 315 316 317 318 319 320
      fi
    else
      echo "Previous deployment NOT found."
    fi
    set -e
    return $status
  }

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
  function deploy() {
    track="${1-stable}"
    name="$CI_ENVIRONMENT_SLUG"

    if [[ "$track" != "stable" ]]; then
      name="$name-$track"
    fi

    replicas="1"
    service_enabled="false"
    postgres_enabled="$POSTGRES_ENABLED"
    # canary uses stable db
    [[ "$track" == "canary" ]] && postgres_enabled="false"

    env_track=$( echo $track | tr -s  '[:lower:]'  '[:upper:]' )
    env_slug=$( echo ${CI_ENVIRONMENT_SLUG//-/_} | tr -s  '[:lower:]'  '[:upper:]' )

    if [[ "$track" == "stable" ]]; then
      # for stable track get number of replicas from `PRODUCTION_REPLICAS`
      eval new_replicas=\$${env_slug}_REPLICAS
      service_enabled="true"
    else
      # for all tracks get number of replicas from `CANARY_PRODUCTION_REPLICAS`
      eval new_replicas=\$${env_track}_${env_slug}_REPLICAS
    fi
    if [[ -n "$new_replicas" ]]; then
      replicas="$new_replicas"
    fi

    #ROOT_PASSWORD=$(cat /dev/urandom | LC_TYPE=C tr -dc "[:alpha:]" | head -c 16)
    #echo "Generated root login: $ROOT_PASSWORD"
352 353
    kubectl create secret generic "${CI_ENVIRONMENT_SLUG}-gitlab-initial-root-password" --from-literal=password=$ROOT_PASSWORD -o yaml --dry-run | kubectl replace --force -f -

354
    # YAML_FILE=""${AUTO_DEVOPS_DOMAIN//\./-}.yaml"
355
    # Cleanup and previous installs, as FAILED and PENDING_UPGRADE will cause errors with `upgrade`
356
    if [ "$CI_ENVIRONMENT_SLUG" != "production" ] && previousDeployFailed ; then
357 358
      echo "Deployment in bad state, cleaning up $CI_ENVIRONMENT_SLUG"
      delete
359
      cleanup
360
    fi
Ahmad Hassan's avatar
Ahmad Hassan committed
361
    helm repo add gitlab https://charts.gitlab.io/
362 363
    helm dep update .

364 365
    helm upgrade --install \
      --wait \
366
      --timeout 600 \
367
      --set releaseOverride="$CI_ENVIRONMENT_SLUG" \
368 369
      --set global.hosts.hostSuffix="$HOST_SUFFIX" \
      --set global.hosts.domain="$AUTO_DEVOPS_DOMAIN" \
370
      --set global.ingress.annotations."external-dns\.alpha\.kubernetes\.io/ttl"="10" \
371
      --set global.ingress.tls.secretName=helm-charts-win-tls \
372 373
      --set global.ingress.configureCertmanager=false \
      --set certmanager.install=false \
374
      --set gitlab.unicorn.resources.requests.cpu=400m \
375
      --set gitlab.unicorn.maxReplicas=3 \
376
      --set gitlab.sidekiq.resources.requests.cpu=200m \
377 378
      --set gitlab.sidekiq.maxReplicas=2 \
      --set gitlab.gitlab-shell.resources.requests.cpu=100m \
Ahmad Hassan's avatar
Ahmad Hassan committed
379
      --set gitlab.task-runner.enabled=true \
380 381 382
      --set gitlab.gitlab-shell.maxReplicas=3 \
      --set redis.resources.requests.cpu=100m \
      --set minio.resources.requests.cpu=100m \
383 384 385 386 387 388
      --namespace="$KUBE_NAMESPACE" \
      --version="$CI_PIPELINE_ID-$CI_JOB_ID" \
      "$name" \
      .
  }

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
  function add_license() {
    if [ -z "${REVIEW_APPS_EE_LICENSE}" ]; then echo "License not found" && return; fi

    while [ -z "$(kubectl get pods -n ${KUBE_NAMESPACE} --field-selector=status.phase=Running -lapp=task-runner,release=${CI_ENVIRONMENT_SLUG} --no-headers -o=custom-columns=NAME:.metadata.name)" ]; do
      echo "Waiting till task-runner pod is ready";
      sleep 5;
    done

    task_runner_pod=$(kubectl get pods -n ${KUBE_NAMESPACE} --field-selector=status.phase=Running -lapp=task-runner,release=${CI_ENVIRONMENT_SLUG} --no-headers -o=custom-columns=NAME:.metadata.name)

    if [ -z "${task_runner_pod}" ]; then echo "Task runner pod not found" && return; fi
    echo "Task runner pod is ${task_runner_pod}"

    echo "${REVIEW_APPS_EE_LICENSE}" > /tmp/license.gitlab
    kubectl -n "$KUBE_NAMESPACE" cp /tmp/license.gitlab ${task_runner_pod}:/tmp/license.gitlab
    rm /tmp/license.gitlab

    kubectl -n "$KUBE_NAMESPACE" exec -it ${task_runner_pod} -- /srv/gitlab/bin/rails runner -e production \
407 408 409
     '
     content = File.read("/tmp/license.gitlab").strip;
     FileUtils.rm_f("/tmp/license.gitlab");
410 411

     unless License.where(data:content).empty?
412
       puts "License already exists";
413 414 415 416
       Kernel.exit 0;
     end

     unless License.new(data: content).save
417
       puts "Could not add license";
418 419 420
       Kernel.exit 0;
     end

421 422
     puts "License added";
     '
423 424
  }

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
  function setup_test_db() {
    if [ -z ${KUBERNETES_PORT+x} ]; then
      DB_HOST=postgres
    else
      DB_HOST=localhost
    fi
    export DATABASE_URL="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DB_HOST}:5432/${POSTGRES_DB}"
  }

  function download_chart() {
    if [[ ! -d chart ]]; then
      auto_chart=${AUTO_DEVOPS_CHART:-gitlab/auto-deploy-app}
      auto_chart_name=$(basename $auto_chart)
      auto_chart_name=${auto_chart_name%.tgz}
    else
      auto_chart="chart"
      auto_chart_name="chart"
    fi

    helm init --client-only
    helm repo add gitlab https://charts.gitlab.io
    if [[ ! -d "$auto_chart" ]]; then
      helm fetch ${auto_chart} --untar
    fi
    if [ "$auto_chart_name" != "chart" ]; then
      mv ${auto_chart_name} chart
    fi

    helm dependency update chart/
    helm dependency build chart/
  }

  function ensure_namespace() {
    kubectl describe namespace "$KUBE_NAMESPACE" || kubectl create namespace "$KUBE_NAMESPACE"
  }

  function check_kube_domain() {
    if [ -z ${AUTO_DEVOPS_DOMAIN+x} ]; then
      echo "In order to deploy, AUTO_DEVOPS_DOMAIN must be set as a variable at the group or project level, or manually added in .gitlab-cy.yml"
      false
    else
      true
    fi
  }

  function check_domain_ip() {
    # Expect the `DOMAIN` is a wildcard.
472
    domain_ip=$(nslookup gitlab$DOMAIN 2>/dev/null | grep "Address 1:" | cut -d' ' -f3)
473
    if [ -z $domain_ip ]; then
474
      echo "There was a problem resolving the IP of 'gitlab$DOMAIN'. Be sure you have configured a DNS entry."
475 476 477
      false
    else
      export DOMAIN_IP=$domain_ip
478
      echo "Found IP for gitlab$DOMAIN: $DOMAIN_IP"
479 480 481 482 483 484
      true
    fi
  }

  function install_tiller() {
    echo "Checking Tiller..."
DJ Mountney's avatar
DJ Mountney committed
485
    helm init --upgrade --service-account tiller
486 487 488 489 490 491 492 493
    kubectl rollout status -n "$TILLER_NAMESPACE" -w "deployment/tiller-deploy"
    if ! helm version --debug; then
      echo "Failed to init Tiller."
      return 1
    fi
    echo ""
  }

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
  function install_external_dns() {
    echo "Checking External DNS..."
    release_name="gitlab-external-dns"
    if ! helm status --tiller-namespace "${TILLER_NAMESPACE}" "${release_name}" > /dev/null 2>&1 ; then
      # We need to store the credentials in a secret
      kubectl create secret generic "${release_name}-secret" --from-literal="credentials.json=${GOOGLE_CLOUD_KEYFILE_JSON}"
      helm install stable/external-dns \
        -n "${release_name}" \
        --namespace "${TILLER_NAMESPACE}" \
        --set provider="google" \
        --set domain-filter[0]="helm-charts.win" \
        --set google.project="${GOOGLE_PROJECT_ID}" \
        --set google.serviceAccountSecret="${release_name}-secret" \
        --set txtOwnerId="${TILLER_NAMESPACE}" \
        --set rbac.create="true" \
        --set policy="sync"
    fi
  }

513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
  function create_secret() {
    kubectl create secret -n "$KUBE_NAMESPACE" \
      docker-registry gitlab-registry-docker \
      --docker-server="$CI_REGISTRY" \
      --docker-username="$CI_REGISTRY_USER" \
      --docker-password="$CI_REGISTRY_PASSWORD" \
      --docker-email="$GITLAB_USER_EMAIL" \
      -o yaml --dry-run | kubectl replace -n "$KUBE_NAMESPACE" --force -f -
  }

  function delete() {
    track="${1-stable}"
    name="$CI_ENVIRONMENT_SLUG"

    if [[ "$track" != "stable" ]]; then
      name="$name-$track"
    fi
DJ Mountney's avatar
DJ Mountney committed
530
    helm delete --purge "$name" || true
531 532
  }

533
  function cleanup() {
534
    kubectl -n "$KUBE_NAMESPACE" get ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa 2>&1 \
535
      | grep "$CI_ENVIRONMENT_SLUG" \
536 537
      | awk '{print $1}' \
      | xargs kubectl -n "$KUBE_NAMESPACE" delete \
538
      || true
539 540
  }

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
.specs: &specs
  image: gunesmes/docker-capybara-chrome
  stage: specs
  services:
  - docker:dind
  variables:
    DOCKER_DRIVER: overlay2
    DOCKER_HOST: tcp://docker:2375
    GITLAB_PASSWORD: $ROOT_PASSWORD
    RELEASE_NAME: $CI_ENVIRONMENT_SLUG
    S3_CONFIG_PATH: /etc/gitlab/minio
  script:
    - source variables
    - apt-get update && apt-get install -y --no-install-recommends curl ca-certificates
    - curl -sSL https://get.docker.com/ | sh
    - curl -LsO https://storage.googleapis.com/kubernetes-release/release/v1.9.3/bin/linux/amd64/kubectl
    - chmod +x kubectl
    - mv kubectl /usr/local/bin/kubectl
    - mkdir -p /etc/gitlab/minio
    - kubectl get secret ${CI_ENVIRONMENT_SLUG}-minio-secret -o jsonpath='{.data.accesskey}' | base64 --decode > /etc/gitlab/minio/accesskey
    - kubectl get secret ${CI_ENVIRONMENT_SLUG}-minio-secret -o jsonpath='{.data.secretkey}' | base64 --decode > /etc/gitlab/minio/secretkey
    - bundle install -j $(nproc) --without non_test --path gems
    - bundle exec rspec -c -f d spec
  after_script:
    - *auto_devops
    - add_license
  cache:
    key: "${CI_JOB_NAME}"
    paths:
    - gems

review_specs:
  <<: *specs
  environment:
    name: review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN
    on_stop: stop_review
  only:
    refs:
580
      - branches@charts/gitlab
581 582 583 584 585 586 587 588 589 590 591
  except:
    refs:
      - master

production_specs:
  <<: *specs
  environment:
    name: production
    url: https://gitlab.$AUTO_DEVOPS_DOMAIN
  only:
    refs:
592
      - master@charts/gitlab
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
qa:
  image: registry.gitlab.com/gitlab-org/gitlab-omnibus-builder:ruby_docker-0.0.7
  stage: qa
  services:
  - docker:dind
  variables:
    DOCKER_DRIVER: overlay2
    DOCKER_HOST: tcp://docker:2375
    QA_ARTIFACTS_DIR: $CI_PROJECT_DIR
  script:
    - docker login -u gitlab-ci-token -p "$CI_JOB_TOKEN" "$CI_REGISTRY"
    - gem install gitlab-qa
    - source variables
    - app_version=$(ruby -e "require 'yaml'; puts YAML.safe_load(File.read('Chart.yaml'))['appVersion']")
    - qa_version="nightly"
    - if [ "$app_version" != "master"  ]; then
    -   qa_version="${app_version}-ee"
    - fi
Balasankar "Balu" C's avatar
Balasankar "Balu" C committed
611
    - GITLAB_USERNAME=root GITLAB_PASSWORD=$ROOT_PASSWORD GITLAB_ADMIN_USERNAME=root GITLAB_ADMIN_PASSWORD=$ROOT_PASSWORD EE_LICENSE=$REVIEW_APPS_EE_LICENSE gitlab-qa Test::Instance::Any EE:$qa_version https://$QA_ENVIRONMENT_URL
612 613 614 615 616 617 618 619 620 621 622
  artifacts:
    when: on_failure
    expire_in: 7d
    paths:
    - ./gitlab-qa-run-*
  only:
    refs:
      - branches
  retry: 1
  allow_failure: true

623 624
before_script:
  - *auto_devops