.gitlab-ci.yml 24.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
# This file is a template, and might need editing before it works on your project.
# Auto DevOps
# This CI/CD configuration provides a standard pipeline for
# * building a Docker image (using a buildpack if necessary),
# * storing the image in the container registry,
# * running tests from a buildpack,
# * running code quality analysis,
# * creating a review app for each topic branch,
# * and continuous deployment to production
#
# In order to deploy, you must have a Kubernetes cluster configured either
# via a project integration, or via group/project variables.
# AUTO_DEVOPS_DOMAIN must also be set as a variable at the group or project
# level, or manually added below.
#
# If you want to deploy to staging first, or enable canary deploys,
# uncomment the relevant jobs in the pipeline below.
#
# If Auto DevOps fails to detect the proper buildpack, or if you want to
# specify a custom buildpack, set a project variable `BUILDPACK_URL` to the
# repository URL of the buildpack.
# e.g. BUILDPACK_URL=https://github.com/heroku/heroku-buildpack-ruby.git#v142
# If you need multiple buildpacks, add a file to your project called
# `.buildpacks` that contains the URLs, one on each line, in order.
# Note: Auto CI does not work with multiple buildpacks yet

Rémy Coutable's avatar
Rémy Coutable committed
27
image: registry.gitlab.com/gitlab-org/gitlab-build-images:gitlab-charts-build-base
28 29

variables:
30
  GOOGLE_APPLICATION_CREDENTIALS: ${CI_PROJECT_DIR}/.google_keyfile.json
31 32 33 34 35 36 37 38 39
  # AUTO_DEVOPS_DOMAIN is the application deployment domain and should be set as a variable at the group or project level.
  # AUTO_DEVOPS_DOMAIN: domain.example.com

  POSTGRES_USER: user
  POSTGRES_PASSWORD: testing-password
  POSTGRES_ENABLED: "false"
  POSTGRES_DB: $CI_ENVIRONMENT_SLUG

stages:
40
  - prepare
41 42 43
  - review
  - staging
  - canary
Ahmad Hassan's avatar
Ahmad Hassan committed
44
  - stable
45
  - specs
Ahmad Hassan's avatar
Ahmad Hassan committed
46
  - qa
47
  - package
48 49
  - cleanup

50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
lint_package:
  stage: package
  when: always
  script:
    - helm init --client-only
    - helm repo add gitlab https://charts.gitlab.io
    - helm dependencies update
    - helm lint --set certmanager-issuer.email=support@gitlab.com
    - mkdir -p build
    - helm package -d build .
  artifacts:
    expire_in: 3d
    paths:
    - build
  except:
    - tags

67
.review_template:
68
  stage: review
69 70 71 72
  variables:
    HOST_SUFFIX: "$CI_ENVIRONMENT_SLUG"
    DOMAIN: "-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN"
    VARIABLES_FILE: "variables/${CI_JOB_NAME}"
73
  script:
74
    - mkdir -p $(dirname "${VARIABLES_FILE}")
75 76 77 78
    - check_kube_domain
    - ensure_namespace
    - install_tiller
    - create_secret
79
    - install_external_dns "${DNS_PROVIDER}" "${KUBE_INGRESS_BASE_DOMAIN}"
Jason Plum's avatar
Jason Plum committed
80
    - if ! crdExists || previousDeployFailed ; then OPERATOR_BOOTSTRAP=true deploy ; fi
81
    - deploy
82
    - add_license
83 84 85 86 87
    - echo "export QA_ENVIRONMENT_URL=gitlab-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN" >> "${VARIABLES_FILE}"
    - echo "export GITLAB_ROOT_DOMAIN=$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN"        >> "${VARIABLES_FILE}"
    - echo "export GITLAB_URL=gitlab-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN"         >> "${VARIABLES_FILE}"
    - echo "export REGISTRY_URL=registry-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN"     >> "${VARIABLES_FILE}"
    - echo "export S3_ENDPOINT=https://minio-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN" >> "${VARIABLES_FILE}"
Ahmad Hassan's avatar
Ahmad Hassan committed
88 89 90
  artifacts:
    paths:
    - variables
91 92 93
  only:
    refs:
      - branches
94 95
    variables:
      - $KUBECONFIG
96 97 98
  except:
    - master

99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
review_gke:
  variables:
    DNS_PROVIDER: "google"
  extends: .review_template
  environment:
    name: gke_review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN
    on_stop: stop_review_gke

review_eks:
  variables:
    DNS_PROVIDER: "aws"
  extends: .review_template
  environment:
    name: eks_review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN
    on_stop: stop_review_eks

.stop_review_template:
118
  stage: review
119 120
  variables:
    GIT_CHECKOUT: "false"
121
  script:
122
    - git checkout master
123
    - delete
124
    - cleanup
125 126 127 128 129
  when: manual
  allow_failure: true
  only:
    refs:
      - branches
130 131
    variables:
      - $KUBECONFIG
132 133 134
  except:
    - master

135 136 137 138 139 140 141 142 143 144 145 146
stop_review_gke:
  extends: .stop_review_template
  environment:
    name: gke_review/$CI_COMMIT_REF_NAME
    action: stop

stop_review_eks:
  extends: .stop_review_template
  environment:
    name: eks_review/$CI_COMMIT_REF_NAME
    action: stop

147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
# Keys that start with a dot (.) will not be processed by GitLab CI.
# Staging and canary jobs are disabled by default, to enable them
# remove the dot (.) before the job name.
# https://docs.gitlab.com/ee/ci/yaml/README.html#hidden-keys

# Staging deploys are disabled by default since
# continuous deployment to production is enabled by default
# If you prefer to automatically deploy to staging and
# only manually promote to production, enable this job by removing the dot (.),
# and uncomment the `when: manual` line in the `production` job.

.staging:
  stage: staging
  script:
    - check_kube_domain
    - check_domain_ip
#    - download_chart
    - ensure_namespace
    - install_tiller
    - create_secret
    - deploy
  environment:
    name: staging
170
    url: https://gitlab-staging.$KUBE_INGRESS_BASE_DOMAIN
171
  variables:
172
    DOMAIN: -staging.$KUBE_INGRESS_BASE_DOMAIN
173 174 175
  only:
    refs:
      - master
176 177
    variables:
      - $KUBECONFIG
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194

# Canaries are disabled by default, but if you want them,
# and know what the downsides are, enable this job by removing the dot (.),
# and uncomment the `when: manual` line in the `production` job.

.canary:
  stage: canary
  script:
    - check_kube_domain
    - check_domain_ip
#    - download_chart
    - ensure_namespace
    - install_tiller
    - create_secret
    - deploy canary
  environment:
    name: production
195
    url: https://gitlab.$KUBE_INGRESS_BASE_DOMAIN
196
  variables:
197
    DOMAIN: ".$KUBE_INGRESS_BASE_DOMAIN"
198 199 200 201
  when: manual
  only:
    refs:
      - master
202 203
    variables:
      - $KUBECONFIG
204 205 206 207 208 209

# This job continuously deploys to production on every push to `master`.
# To make this a manual process, either because you're enabling `staging`
# or `canary` deploys, or you simply want more control over when you deploy
# to production, uncomment the `when: manual` line in the `production` job.

210
.stable:
Ahmad Hassan's avatar
Ahmad Hassan committed
211
  stage: stable
212
  script:
213
    - mkdir -p $(dirname "${VARIABLES_FILE}")
214 215 216 217 218 219
    - check_kube_domain
    - check_domain_ip
    - download_chart
    - ensure_namespace
    - install_tiller
    - create_secret
220
    - if ! crdExists || previousDeployFailed ; then OPERATOR_BOOTSTRAP=true deploy ; fi
221 222
    - deploy
    - delete canary
223 224 225
    - echo "export QA_ENVIRONMENT_URL=gitlab.$KUBE_INGRESS_BASE_DOMAIN" >> "${VARIABLES_FILE}"
    - echo "export GITLAB_ROOT_DOMAIN=$KUBE_INGRESS_BASE_DOMAIN"        >> "${VARIABLES_FILE}"
    - echo "export S3_ENDPOINT=https://minio.$KUBE_INGRESS_BASE_DOMAIN" >> "${VARIABLES_FILE}"
Ahmad Hassan's avatar
Ahmad Hassan committed
226 227 228
  artifacts:
    paths:
    - variables
229
  variables:
230
    DOMAIN: ".$KUBE_INGRESS_BASE_DOMAIN"
231 232 233 234
#  when: manual
  only:
    refs:
      - master
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
    variables:
      - $KUBECONFIG

stable_gke:
  extends: .stable
  environment:
    name: gke_production
    url: https://gitlab.$KUBE_INGRESS_BASE_DOMAIN
  variables:
    VARIABLES_FILE: "variables/stable_gke"

stable_eks:
  extends: .stable
  environment:
    name: eks_production
    url: https://gitlab.$KUBE_INGRESS_BASE_DOMAIN
  variables:
    VARIABLES_FILE: "variables/stable_eks"
253

254 255 256
review_helm_test:
  stage: qa
  environment:
257 258 259
    name: gke_review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN
    on_stop: stop_review_gke
260 261
  script:
    - export TILLER_NAMESPACE=$KUBE_NAMESPACE
262
    - helm test --cleanup "$CI_ENVIRONMENT_SLUG"
263 264 265
  only:
    refs:
      - branches
266 267
    variables:
      - $KUBECONFIG
268 269 270 271 272 273 274
  except:
    refs:
      - master

production_helm_test:
  stage: qa
  environment:
275
    name: gke_production
276
    url: https://gitlab.$KUBE_INGRESS_BASE_DOMAIN
277 278
  script:
    - export TILLER_NAMESPACE=$KUBE_NAMESPACE
279
    - helm test --cleanup "$CI_ENVIRONMENT_SLUG"
280 281
  only:
    refs:
282
      - master@charts/gitlab
283 284
    variables:
      - $KUBECONFIG
285

286

287 288 289 290 291 292 293 294 295 296
debug_review:
  stage: qa
  when: on_failure
  script:
    - kubectl -n "$KUBE_NAMESPACE" describe pod
    - kubectl -n "$KUBE_NAMESPACE" get pod,jobs,secret,ing,cm,sa,svc,role,rolebinding,pvc
  artifacts:
    paths:
    - variables
  environment:
297 298 299
    name: gke_review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN
    on_stop: stop_review_gke
300 301
  variables:
    HOST_SUFFIX: "$CI_ENVIRONMENT_SLUG"
302
    DOMAIN: "-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN"
303 304
  only:
    refs:
305
      - branches
306 307
    variables:
      - $KUBECONFIG
308 309 310
  except:
    - master

311
danger-review:
312
  image: registry.gitlab.com/gitlab-org/gitlab-build-images:danger
313 314 315
  stage: prepare
  cache: {}
  only:
316 317 318
    variables:
      - $DANGER_GITLAB_API_TOKEN
  except:
319
    refs:
320 321
      - master
      - tags
322 323 324 325
  script:
    - git version
    - danger --fail-on-errors=true

326 327 328 329 330 331 332 333 334 335 336 337 338
check_docs_internal_links:
  image: "registry.gitlab.com/gitlab-org/gitlab-build-images:gitlab-docs-lint"
  stage: prepare
  cache: {}
  dependencies: []
  before_script: []
  script:
    - mv doc/ /tmp/gitlab-docs/content/charts
    - cd /tmp/gitlab-docs
    # Build HTML from Markdown
    - bundle exec nanoc
    # Check the internal links
    - bundle exec nanoc check internal_links
339 340
    # Check the internal anchor links
    - bundle exec nanoc check internal_anchors
341

342 343 344 345 346 347 348 349 350 351 352 353
# ---------------------------------------------------------------------------

.auto_devops: &auto_devops |
  # Auto DevOps variables and functions
  [[ "$TRACE" ]] && set -x
  auto_database_url=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${CI_ENVIRONMENT_SLUG}-postgres:5432/${POSTGRES_DB}
  export DATABASE_URL=${DATABASE_URL-$auto_database_url}
  export CI_APPLICATION_REPOSITORY=$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG
  export CI_APPLICATION_TAG=$CI_COMMIT_SHA
  export CI_CONTAINER_NAME=ci_job_build_${CI_JOB_ID}
  export TILLER_NAMESPACE=$KUBE_NAMESPACE

354 355
  function previousDeployFailed() {
    set +e
356
    echo "Checking for previous deployment of $CI_ENVIRONMENT_SLUG"
357 358 359 360
    deployment_status=$(helm status $CI_ENVIRONMENT_SLUG >/dev/null 2>&1)
    status=$?
    # if `status` is `0`, deployment exists, has a status
    if [ $status -eq 0 ]; then
361
      echo "Previous deployment found, checking status"
362 363
      deployment_status=$(helm status $CI_ENVIRONMENT_SLUG | grep ^STATUS | cut -d' ' -f2)
      echo "Previous deployment state: $deployment_status"
364
      if [[ "$deployment_status" == "FAILED" || "$deployment_status" == "PENDING_UPGRADE" || "$deployment_status" == "PENDING_INSTALL" ]]; then
365
        status=0;
366 367
      else
        status=1;
368 369 370 371 372 373 374 375
      fi
    else
      echo "Previous deployment NOT found."
    fi
    set -e
    return $status
  }

Jason Plum's avatar
Jason Plum committed
376 377 378 379 380 381 382 383 384 385 386 387
  function crdExists() {
    echo "Checking for existing GitLab Operator CRD"
    kubectl get crd/gitlabs.${CI_ENVIRONMENT_SLUG}.gitlab.com >/dev/null 2>&1
    status=$?
    if [ $status -eq 0 ]; then
      echo "GitLab Operator CRD exists."
    else
      echo "GitLab Operator CRD does NOT exist."
    fi
    return $status
  }

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
  function deploy() {
    track="${1-stable}"
    name="$CI_ENVIRONMENT_SLUG"

    if [[ "$track" != "stable" ]]; then
      name="$name-$track"
    fi

    replicas="1"
    service_enabled="false"
    postgres_enabled="$POSTGRES_ENABLED"
    # canary uses stable db
    [[ "$track" == "canary" ]] && postgres_enabled="false"

    env_track=$( echo $track | tr -s  '[:lower:]'  '[:upper:]' )
    env_slug=$( echo ${CI_ENVIRONMENT_SLUG//-/_} | tr -s  '[:lower:]'  '[:upper:]' )

    if [[ "$track" == "stable" ]]; then
      # for stable track get number of replicas from `PRODUCTION_REPLICAS`
      eval new_replicas=\$${env_slug}_REPLICAS
      service_enabled="true"
    else
      # for all tracks get number of replicas from `CANARY_PRODUCTION_REPLICAS`
      eval new_replicas=\$${env_track}_${env_slug}_REPLICAS
    fi
    if [[ -n "$new_replicas" ]]; then
      replicas="$new_replicas"
    fi

    #ROOT_PASSWORD=$(cat /dev/urandom | LC_TYPE=C tr -dc "[:alpha:]" | head -c 16)
    #echo "Generated root login: $ROOT_PASSWORD"
419 420
    kubectl create secret generic "${CI_ENVIRONMENT_SLUG}-gitlab-initial-root-password" --from-literal=password=$ROOT_PASSWORD -o yaml --dry-run | kubectl replace --force -f -

421
    # YAML_FILE=""${KUBE_INGRESS_BASE_DOMAIN//\./-}.yaml"
422
    # Cleanup and previous installs, as FAILED and PENDING_UPGRADE will cause errors with `upgrade`
423
    if [ "$CI_ENVIRONMENT_SLUG" != "production" ] && previousDeployFailed ; then
424 425
      echo "Deployment in bad state, cleaning up $CI_ENVIRONMENT_SLUG"
      delete
426
      cleanup
427
    fi
428
    helm repo add gitlab https://charts.gitlab.io/
429 430
    helm dep update .

Jason Plum's avatar
Jason Plum committed
431 432 433 434 435 436
    # If OPERATOR_BOOTSTRAP is set, we _do not_ want to use --wait / --timeout
    WAIT="--wait --timeout 600"
    if [ -n "${OPERATOR_BOOTSTRAP}" ]; then
      WAIT=""
    fi

437
    helm upgrade --install \
Jason Plum's avatar
Jason Plum committed
438
      $WAIT \
439
      --set releaseOverride="$CI_ENVIRONMENT_SLUG" \
440
      --set global.hosts.hostSuffix="$HOST_SUFFIX" \
441
      --set global.hosts.domain="$KUBE_INGRESS_BASE_DOMAIN" \
442
      --set global.ingress.annotations."external-dns\.alpha\.kubernetes\.io/ttl"="10" \
443
      --set global.ingress.tls.secretName=helm-charts-win-tls \
444 445
      --set global.ingress.configureCertmanager=false \
      --set certmanager.install=false \
446 447
      --set gitlab.unicorn.maxReplicas=3 \
      --set gitlab.sidekiq.maxReplicas=2 \
448
      --set gitlab.task-runner.enabled=true \
449 450 451
      --set gitlab.gitlab-shell.maxReplicas=3 \
      --set redis.resources.requests.cpu=100m \
      --set minio.resources.requests.cpu=100m \
Jason Plum's avatar
Jason Plum committed
452 453
      --set global.operator.enabled=true \
      --set global.operator.bootstrap=${OPERATOR_BOOTSTRAP-false} \
454
      --set gitlab.operator.crdPrefix="$CI_ENVIRONMENT_SLUG" \
455 456 457 458 459 460
      --namespace="$KUBE_NAMESPACE" \
      --version="$CI_PIPELINE_ID-$CI_JOB_ID" \
      "$name" \
      .
  }

461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
  function add_license() {
    if [ -z "${REVIEW_APPS_EE_LICENSE}" ]; then echo "License not found" && return; fi

    while [ -z "$(kubectl get pods -n ${KUBE_NAMESPACE} --field-selector=status.phase=Running -lapp=task-runner,release=${CI_ENVIRONMENT_SLUG} --no-headers -o=custom-columns=NAME:.metadata.name)" ]; do
      echo "Waiting till task-runner pod is ready";
      sleep 5;
    done

    task_runner_pod=$(kubectl get pods -n ${KUBE_NAMESPACE} --field-selector=status.phase=Running -lapp=task-runner,release=${CI_ENVIRONMENT_SLUG} --no-headers -o=custom-columns=NAME:.metadata.name)

    if [ -z "${task_runner_pod}" ]; then echo "Task runner pod not found" && return; fi
    echo "Task runner pod is ${task_runner_pod}"

    echo "${REVIEW_APPS_EE_LICENSE}" > /tmp/license.gitlab
    kubectl -n "$KUBE_NAMESPACE" cp /tmp/license.gitlab ${task_runner_pod}:/tmp/license.gitlab
    rm /tmp/license.gitlab

    kubectl -n "$KUBE_NAMESPACE" exec -it ${task_runner_pod} -- /srv/gitlab/bin/rails runner -e production \
479 480 481
     '
     content = File.read("/tmp/license.gitlab").strip;
     FileUtils.rm_f("/tmp/license.gitlab");
482 483

     unless License.where(data:content).empty?
484
       puts "License already exists";
485 486 487 488
       Kernel.exit 0;
     end

     unless License.new(data: content).save
489
       puts "Could not add license";
490 491 492
       Kernel.exit 0;
     end

493 494
     puts "License added";
     '
495 496
  }

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
  function setup_test_db() {
    if [ -z ${KUBERNETES_PORT+x} ]; then
      DB_HOST=postgres
    else
      DB_HOST=localhost
    fi
    export DATABASE_URL="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DB_HOST}:5432/${POSTGRES_DB}"
  }

  function download_chart() {
    if [[ ! -d chart ]]; then
      auto_chart=${AUTO_DEVOPS_CHART:-gitlab/auto-deploy-app}
      auto_chart_name=$(basename $auto_chart)
      auto_chart_name=${auto_chart_name%.tgz}
    else
      auto_chart="chart"
      auto_chart_name="chart"
    fi

    helm init --client-only
    helm repo add gitlab https://charts.gitlab.io
    if [[ ! -d "$auto_chart" ]]; then
      helm fetch ${auto_chart} --untar
    fi
    if [ "$auto_chart_name" != "chart" ]; then
      mv ${auto_chart_name} chart
    fi

    helm dependency update chart/
    helm dependency build chart/
  }

  function ensure_namespace() {
    kubectl describe namespace "$KUBE_NAMESPACE" || kubectl create namespace "$KUBE_NAMESPACE"
  }

  function check_kube_domain() {
534 535
    if [ -z ${KUBE_INGRESS_BASE_DOMAIN+x} ]; then
      echo "In order to deploy, KUBE_INGRESS_BASE_DOMAIN must be set as a variable at the group or project level, or manually added in .gitlab-cy.yml"
536 537 538 539 540 541 542
      false
    else
      true
    fi
  }

  function check_domain_ip() {
543 544 545 546 547 548
    # Don't run on EKS clusters
    if [[ "$CI_ENVIRONMENT_SLUG" =~ ^eks.* ]]; then
      echo "Not running on EKS cluster"
      return 0
    fi

549
    # Expect the `DOMAIN` is a wildcard.
550
    domain_ip=$(nslookup gitlab$DOMAIN 2>/dev/null | grep "Address 1:" | cut -d' ' -f3)
551
    if [ -z $domain_ip ]; then
552
      echo "There was a problem resolving the IP of 'gitlab$DOMAIN'. Be sure you have configured a DNS entry."
553 554 555
      false
    else
      export DOMAIN_IP=$domain_ip
556
      echo "Found IP for gitlab$DOMAIN: $DOMAIN_IP"
557 558 559 560 561 562
      true
    fi
  }

  function install_tiller() {
    echo "Checking Tiller..."
563
    helm init --upgrade --service-account tiller
564 565 566 567 568 569 570 571
    kubectl rollout status -n "$TILLER_NAMESPACE" -w "deployment/tiller-deploy"
    if ! helm version --debug; then
      echo "Failed to init Tiller."
      return 1
    fi
    echo ""
  }

572
  function install_external_dns() {
573 574 575 576
    local provider="${1}"
    local domain_filter="${2}"
    local helm_args=''

577 578 579
    echo "Checking External DNS..."
    release_name="gitlab-external-dns"
    if ! helm status --tiller-namespace "${TILLER_NAMESPACE}" "${release_name}" > /dev/null 2>&1 ; then
580 581 582 583 584 585 586 587 588 589 590 591
      case "${provider}" in
        google)
          # We need to store the credentials in a secret
          kubectl create secret generic "${release_name}-secret" --from-literal="credentials.json=${GOOGLE_CLOUD_KEYFILE_JSON}"
          helm_args=" --set google.project='${GOOGLE_PROJECT_ID}' --set google.serviceAccountSecret='${release_name}-secret'"
          ;;
        aws)
          echo "Installing external-dns, ensure the NodeGroup has the permissions specified in"
          echo "https://github.com/helm/charts/tree/master/stable/external-dns#iam-permissions"
          ;;
      esac

592 593 594
      helm install stable/external-dns \
        -n "${release_name}" \
        --namespace "${TILLER_NAMESPACE}" \
595 596
        --set provider="${provider}" \
        --set domain-filter[0]="${domain_filter}" \
597 598
        --set txtOwnerId="${TILLER_NAMESPACE}" \
        --set rbac.create="true" \
599 600
        --set policy='sync' \
        ${helm_args}
601 602 603
    fi
  }

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
  function create_secret() {
    kubectl create secret -n "$KUBE_NAMESPACE" \
      docker-registry gitlab-registry-docker \
      --docker-server="$CI_REGISTRY" \
      --docker-username="$CI_REGISTRY_USER" \
      --docker-password="$CI_REGISTRY_PASSWORD" \
      --docker-email="$GITLAB_USER_EMAIL" \
      -o yaml --dry-run | kubectl replace -n "$KUBE_NAMESPACE" --force -f -
  }

  function delete() {
    track="${1-stable}"
    name="$CI_ENVIRONMENT_SLUG"

    if [[ "$track" != "stable" ]]; then
      name="$name-$track"
    fi
621
    helm delete --purge "$name" || true
622 623
  }

624
  function cleanup() {
625
    gitlabs=''
Jason Plum's avatar
Jason Plum committed
626
    if crdExists ; then
627
      gitlabs=',gitlabs'
Jason Plum's avatar
Jason Plum committed
628 629 630
    fi

    kubectl -n "$KUBE_NAMESPACE" get ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa,crd${gitlabs} 2>&1 \
631
      | grep "$CI_ENVIRONMENT_SLUG" \
632 633
      | awk '{print $1}' \
      | xargs kubectl -n "$KUBE_NAMESPACE" delete \
634
      || true
635 636
  }

637
.specs: &specs
638
  image: registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.5.3-git-2.18-chrome-71.0-node-8.x-yarn-1.12-graphicsmagick-1.3.29-docker-18.06.1
639 640 641 642 643 644 645 646 647 648
  stage: specs
  services:
  - docker:dind
  variables:
    DOCKER_DRIVER: overlay2
    DOCKER_HOST: tcp://docker:2375
    GITLAB_PASSWORD: $ROOT_PASSWORD
    RELEASE_NAME: $CI_ENVIRONMENT_SLUG
    S3_CONFIG_PATH: /etc/gitlab/minio
  script:
649
    - source "${VARIABLES_FILE}"
650 651 652 653 654 655 656 657 658 659 660 661
    - apt-get update && apt-get install -y --no-install-recommends curl ca-certificates
    - curl -LsO https://storage.googleapis.com/kubernetes-release/release/v1.9.3/bin/linux/amd64/kubectl
    - chmod +x kubectl
    - mv kubectl /usr/local/bin/kubectl
    - mkdir -p /etc/gitlab/minio
    - kubectl get secret ${CI_ENVIRONMENT_SLUG}-minio-secret -o jsonpath='{.data.accesskey}' | base64 --decode > /etc/gitlab/minio/accesskey
    - kubectl get secret ${CI_ENVIRONMENT_SLUG}-minio-secret -o jsonpath='{.data.secretkey}' | base64 --decode > /etc/gitlab/minio/secretkey
    - bundle install -j $(nproc) --without non_test --path gems
    - bundle exec rspec -c -f d spec
  after_script:
    - *auto_devops
    - add_license
662 663 664 665
  artifacts:
    when: on_failure
    expire_in: 7d
    paths:
666
    - tmp/capybara
667 668 669 670 671
  cache:
    key: "${CI_JOB_NAME}"
    paths:
    - gems

672 673 674 675
review_specs_gke:
  extends: .specs
  variables:
    VARIABLES_FILE: "variables/review_gke"
676
  environment:
677 678 679
    name: gke_review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN
    on_stop: stop_review_gke
680 681
  only:
    refs:
682
      - branches
683 684
    variables:
      - $KUBECONFIG
685 686 687 688
  except:
    refs:
      - master

689 690 691 692
review_specs_eks:
  extends: .specs
  variables:
    VARIABLES_FILE: "variables/review_eks"
693
  environment:
694 695 696 697 698 699 700 701 702 703 704 705 706 707
    name: eks_review/$CI_COMMIT_REF_NAME
    url: https://gitlab-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN
    on_stop: stop_review_eks
  only:
    refs:
      - branches
    variables:
      - $KUBECONFIG
  except:
    refs:
      - master

.production_specs:
  extends: .specs
708 709
  only:
    refs:
710
      - master
711 712 713 714 715 716 717 718 719 720
    variables:
      - $KUBECONFIG

production_specs_gke:
  extends: .production_specs
  variables:
    VARIABLES_FILE: "variables/stable_gke"
  environment:
    name: gke_production
    url: https://gitlab.$KUBE_INGRESS_BASE_DOMAIN
721

722 723
production_specs_eks:
  extends: .production_specs
724
  allow_failure: true
725 726 727
  variables:
    VARIABLES_FILE: "variables/stable_eks"
  environment:
728
    name: eks_production
729 730 731
    url: https://gitlab.$KUBE_INGRESS_BASE_DOMAIN

.qa:
732 733 734 735 736 737 738 739 740 741 742
  image: registry.gitlab.com/gitlab-org/gitlab-omnibus-builder:ruby_docker-0.0.7
  stage: qa
  services:
  - docker:dind
  variables:
    DOCKER_DRIVER: overlay2
    DOCKER_HOST: tcp://docker:2375
    QA_ARTIFACTS_DIR: $CI_PROJECT_DIR
  script:
    - docker login -u gitlab-ci-token -p "$CI_JOB_TOKEN" "$CI_REGISTRY"
    - gem install gitlab-qa
743
    - source "${VARIABLES_FILE}"
744 745 746 747 748
    - app_version=$(ruby -e "require 'yaml'; puts YAML.safe_load(File.read('Chart.yaml'))['appVersion']")
    - qa_version="nightly"
    - if [ "$app_version" != "master"  ]; then
    -   qa_version="${app_version}-ee"
    - fi
749
    - GITLAB_USERNAME=root GITLAB_PASSWORD=$ROOT_PASSWORD GITLAB_ADMIN_USERNAME=root GITLAB_ADMIN_PASSWORD=$ROOT_PASSWORD EE_LICENSE=$REVIEW_APPS_EE_LICENSE gitlab-qa Test::Instance::Any EE:$qa_version https://$QA_ENVIRONMENT_URL
750 751 752 753 754 755 756 757
  artifacts:
    when: on_failure
    expire_in: 7d
    paths:
    - ./gitlab-qa-run-*
  only:
    refs:
      - branches
758 759
    variables:
      - $KUBECONFIG
760 761 762
  retry: 1
  allow_failure: true

763 764 765 766 767 768 769 770 771 772 773
sync_images:
  image: registry.gitlab.com/gitlab-org/gitlab-omnibus-builder:ruby_docker-0.0.7
  stage: prepare
  services:
  - docker:dind
  before_script: []
  variables:
    DOCKER_DRIVER: overlay2
    DOCKER_HOST: tcp://docker:2375
  script:
    - bundle install
774 775
    - bundle exec rake images:sync[ee]
    - bundle exec rake images:sync[ce]
776 777 778 779 780 781
  only:
    - tags@gitlab/charts/gitlab

release_package:
  stage: package
  script:
782
    - curl --request POST --form "token=${COM_CHARTS_TRIGGER_TOKEN}" --form ref=master
783 784 785 786 787 788 789 790
        --form "variables[CHART_NAME]=$CI_PROJECT_NAME"
        --form "variables[RELEASE_REF]=$CI_COMMIT_REF_NAME"
        https://gitlab.com/api/v4/projects/2860651/trigger/pipeline
  only:
    - tags@gitlab/charts/gitlab
  dependencies:
    - sync_images

791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
.qa_branch:
  extends: .qa
  except:
    refs:
      - master

.qa_production:
  extends: .qa
  only:
    refs:
      - master

qa_gke:
  extends: .qa_branch
  variables:
    VARIABLES_FILE: "variables/review_gke"
  environment:
    name: gke_review/$CI_COMMIT_REF_NAME
809
    on_stop: stop_review_gke
810 811 812 813 814 815 816

qa_eks:
  extends: .qa_branch
  variables:
    VARIABLES_FILE: "variables/review_eks"
  environment:
    name: eks_review/$CI_COMMIT_REF_NAME
817
    on_stop: stop_review_eks
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833

qa_gke_production:
  extends: .qa_production
  variables:
    VARIABLES_FILE: "variables/stable_gke"
  environment:
    name: gke_production/$CI_COMMIT_REF_NAME

qa_eks_production:
  extends: .qa_production
  variables:
    VARIABLES_FILE: "variables/stable_eks"
  environment:
    name: eks_production/$CI_COMMIT_REF_NAME


834 835
before_script:
  - *auto_devops