Commit 47807774 authored by Dmitriy Zaporozhets's avatar Dmitriy Zaporozhets

Merge branch '47234-composable-auto-devops' into 'master'

Resolve "Composable Auto DevOps"

Closes #47234

See merge request gitlab-org/gitlab-ce!26520
parents c3c5190c 343e8343
---
title: Split Auto-DevOps.gitlab-ci.yml into reusable templates
merge_request: 26520
author:
type: changed
......@@ -699,6 +699,21 @@ renaming `.staging` to `staging`. Then make sure to uncomment the `when` key of
the `production` job to turn it into a manual action instead of deploying
automatically.
### Using components of Auto-DevOps
If you only require a subset of the features offered by Auto-DevOps, you can include
individual Auto-DevOps jobs into your own `.gitlab-ci.yml`.
For example, to make use of [Auto Build](#auto-build), you can add the following to
your `.gitlab-ci.yml`:
```yaml
include:
- template: Jobs/Build.gitlab-ci.yml
```
Consult the [Auto DevOps template] for information on available jobs.
### PostgreSQL database support
In order to support applications that require a database,
......
......@@ -72,879 +72,14 @@ stages:
- performance
- cleanup
build:
stage: build
image: "registry.gitlab.com/gitlab-org/cluster-integration/auto-build-image/master:stable"
services:
- docker:stable-dind
script:
- /build/build.sh
only:
- branches
- tags
test:
services:
- postgres:latest
variables:
POSTGRES_DB: test
stage: test
image: gliderlabs/herokuish:latest
script:
- setup_test_db
- cp -R . /tmp/app
- /bin/herokuish buildpack test
only:
- branches
- tags
except:
variables:
- $TEST_DISABLED
code_quality:
stage: test
image: docker:stable
allow_failure: true
services:
- docker:stable-dind
script:
- setup_docker
- code_quality
artifacts:
paths: [gl-code-quality-report.json]
only:
- branches
- tags
except:
variables:
- $CODE_QUALITY_DISABLED
license_management:
stage: test
image:
name: "registry.gitlab.com/gitlab-org/security-products/license-management:$CI_SERVER_VERSION_MAJOR-$CI_SERVER_VERSION_MINOR-stable"
entrypoint: [""]
allow_failure: true
script:
- license_management
artifacts:
paths: [gl-license-management-report.json]
only:
refs:
- branches
- tags
variables:
- $GITLAB_FEATURES =~ /\blicense_management\b/
except:
variables:
- $LICENSE_MANAGEMENT_DISABLED
performance:
stage: performance
image: docker:stable
allow_failure: true
services:
- docker:stable-dind
script:
- setup_docker
- performance
artifacts:
paths:
- performance.json
- sitespeed-results/
only:
refs:
- branches
- tags
kubernetes: active
except:
variables:
- $PERFORMANCE_DISABLED
sast:
stage: test
image: docker:stable
allow_failure: true
services:
- docker:stable-dind
script:
- setup_docker
- sast
artifacts:
reports:
sast: gl-sast-report.json
only:
refs:
- branches
- tags
variables:
- $GITLAB_FEATURES =~ /\bsast\b/
except:
variables:
- $SAST_DISABLED
dependency_scanning:
stage: test
image: docker:stable
allow_failure: true
services:
- docker:stable-dind
script:
- setup_docker
- dependency_scanning
artifacts:
reports:
dependency_scanning: gl-dependency-scanning-report.json
only:
refs:
- branches
- tags
variables:
- $GITLAB_FEATURES =~ /\bdependency_scanning\b/
except:
variables:
- $DEPENDENCY_SCANNING_DISABLED
container_scanning:
stage: test
image: docker:stable
allow_failure: true
services:
- docker:stable-dind
script:
- setup_docker
- container_scanning
artifacts:
paths: [gl-container-scanning-report.json]
only:
refs:
- branches
- tags
variables:
- $GITLAB_FEATURES =~ /\bcontainer_scanning\b/
except:
variables:
- $CONTAINER_SCANNING_DISABLED
dast:
stage: dast
allow_failure: true
image: registry.gitlab.com/gitlab-org/security-products/zaproxy
variables:
POSTGRES_DB: "false"
script:
- dast
artifacts:
paths: [gl-dast-report.json]
only:
refs:
- branches
- tags
kubernetes: active
variables:
- $GITLAB_FEATURES =~ /\bdast\b/
except:
refs:
- master
variables:
- $DAST_DISABLED
review:
stage: review
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- initialize_tiller
- create_secret
- deploy
- persist_environment_url
environment:
name: review/$CI_COMMIT_REF_NAME
url: http://$CI_PROJECT_ID-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN
on_stop: stop_review
artifacts:
paths: [environment_url.txt]
only:
refs:
- branches
- tags
kubernetes: active
except:
refs:
- master
variables:
- $REVIEW_DISABLED
stop_review:
stage: cleanup
variables:
GIT_STRATEGY: none
script:
- install_dependencies
- initialize_tiller
- delete
environment:
name: review/$CI_COMMIT_REF_NAME
action: stop
when: manual
allow_failure: true
only:
refs:
- branches
- tags
kubernetes: active
except:
refs:
- master
variables:
- $REVIEW_DISABLED
# Staging deploys are disabled by default since
# continuous deployment to production is enabled by default
# If you prefer to automatically deploy to staging and
# only manually promote to production, enable this job by setting
# STAGING_ENABLED.
staging:
stage: staging
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- initialize_tiller
- create_secret
- deploy
environment:
name: staging
url: http://$CI_PROJECT_PATH_SLUG-staging.$KUBE_INGRESS_BASE_DOMAIN
only:
refs:
- master
kubernetes: active
variables:
- $STAGING_ENABLED
# Canaries are also disabled by default, but if you want them,
# and know what the downsides are, you can enable this by setting
# CANARY_ENABLED.
canary:
stage: canary
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- initialize_tiller
- create_secret
- deploy canary
environment:
name: production
url: http://$CI_PROJECT_PATH_SLUG.$KUBE_INGRESS_BASE_DOMAIN
when: manual
only:
refs:
- master
kubernetes: active
variables:
- $CANARY_ENABLED
.production: &production_template
stage: production
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- initialize_tiller
- create_secret
- deploy
- delete canary
- delete rollout
- persist_environment_url
environment:
name: production
url: http://$CI_PROJECT_PATH_SLUG.$KUBE_INGRESS_BASE_DOMAIN
artifacts:
paths: [environment_url.txt]
production:
<<: *production_template
only:
refs:
- master
kubernetes: active
except:
variables:
- $STAGING_ENABLED
- $CANARY_ENABLED
- $INCREMENTAL_ROLLOUT_ENABLED
- $INCREMENTAL_ROLLOUT_MODE
production_manual:
<<: *production_template
when: manual
allow_failure: false
only:
refs:
- master
kubernetes: active
variables:
- $STAGING_ENABLED
- $CANARY_ENABLED
except:
variables:
- $INCREMENTAL_ROLLOUT_ENABLED
- $INCREMENTAL_ROLLOUT_MODE
# This job implements incremental rollout on for every push to `master`.
.rollout: &rollout_template
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- initialize_tiller
- create_secret
- deploy rollout $ROLLOUT_PERCENTAGE
- scale stable $((100-ROLLOUT_PERCENTAGE))
- delete canary
- persist_environment_url
environment:
name: production
url: http://$CI_PROJECT_PATH_SLUG.$KUBE_INGRESS_BASE_DOMAIN
artifacts:
paths: [environment_url.txt]
.manual_rollout_template: &manual_rollout_template
<<: *rollout_template
stage: production
when: manual
# This selectors are backward compatible mode with $INCREMENTAL_ROLLOUT_ENABLED (before 11.4)
only:
refs:
- master
kubernetes: active
variables:
- $INCREMENTAL_ROLLOUT_MODE == "manual"
- $INCREMENTAL_ROLLOUT_ENABLED
except:
variables:
- $INCREMENTAL_ROLLOUT_MODE == "timed"
.timed_rollout_template: &timed_rollout_template
<<: *rollout_template
when: delayed
start_in: 5 minutes
only:
refs:
- master
kubernetes: active
variables:
- $INCREMENTAL_ROLLOUT_MODE == "timed"
timed rollout 10%:
<<: *timed_rollout_template
stage: incremental rollout 10%
variables:
ROLLOUT_PERCENTAGE: 10
timed rollout 25%:
<<: *timed_rollout_template
stage: incremental rollout 25%
variables:
ROLLOUT_PERCENTAGE: 25
timed rollout 50%:
<<: *timed_rollout_template
stage: incremental rollout 50%
variables:
ROLLOUT_PERCENTAGE: 50
timed rollout 100%:
<<: *timed_rollout_template
<<: *production_template
stage: incremental rollout 100%
variables:
ROLLOUT_PERCENTAGE: 100
rollout 10%:
<<: *manual_rollout_template
variables:
ROLLOUT_PERCENTAGE: 10
rollout 25%:
<<: *manual_rollout_template
variables:
ROLLOUT_PERCENTAGE: 25
rollout 50%:
<<: *manual_rollout_template
variables:
ROLLOUT_PERCENTAGE: 50
rollout 100%:
<<: *manual_rollout_template
<<: *production_template
allow_failure: false
# ---------------------------------------------------------------------------
.auto_devops: &auto_devops |
# Auto DevOps variables and functions
[[ "$TRACE" ]] && set -x
auto_database_url=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${CI_ENVIRONMENT_SLUG}-postgres:5432/${POSTGRES_DB}
export DATABASE_URL=${DATABASE_URL-$auto_database_url}
if [[ -z "$CI_COMMIT_TAG" ]]; then
export CI_APPLICATION_REPOSITORY=$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG
export CI_APPLICATION_TAG=$CI_COMMIT_SHA
else
export CI_APPLICATION_REPOSITORY=$CI_REGISTRY_IMAGE
export CI_APPLICATION_TAG=$CI_COMMIT_TAG
fi
export TILLER_NAMESPACE=$KUBE_NAMESPACE
# Extract "MAJOR.MINOR" from CI_SERVER_VERSION and generate "MAJOR-MINOR-stable" for Security Products
export SP_VERSION=$(echo "$CI_SERVER_VERSION" | sed 's/^\([0-9]*\)\.\([0-9]*\).*/\1-\2-stable/')
function registry_login() {
if [[ -n "$CI_REGISTRY_USER" ]]; then
echo "Logging to GitLab Container Registry with CI credentials..."
docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" "$CI_REGISTRY"
echo ""
fi
}
function container_scanning() {
registry_login
docker run -d --name db arminc/clair-db:latest
docker run -p 6060:6060 --link db:postgres -d --name clair --restart on-failure arminc/clair-local-scan:v2.0.6
apk add -U wget ca-certificates
docker pull ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG}
wget https://github.com/arminc/clair-scanner/releases/download/v8/clair-scanner_linux_amd64
mv clair-scanner_linux_amd64 clair-scanner
chmod +x clair-scanner
touch clair-whitelist.yml
retries=0
echo "Waiting for clair daemon to start"
while( ! wget -T 10 -q -O /dev/null http://${DOCKER_SERVICE}:6060/v1/namespaces ) ; do sleep 1 ; echo -n "." ; if [ $retries -eq 10 ] ; then echo " Timeout, aborting." ; exit 1 ; fi ; retries=$(($retries+1)) ; done
./clair-scanner -c http://${DOCKER_SERVICE}:6060 --ip $(hostname -i) -r gl-container-scanning-report.json -l clair.log -w clair-whitelist.yml ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG} || true
}
function code_quality() {
docker run --env SOURCE_CODE="$PWD" \
--volume "$PWD":/code \
--volume /var/run/docker.sock:/var/run/docker.sock \
"registry.gitlab.com/gitlab-org/security-products/codequality:$SP_VERSION" /code
}
function license_management() {
/run.sh analyze .
}
function sast() {
case "$CI_SERVER_VERSION" in
*-ee)
# Deprecation notice for CONFIDENCE_LEVEL variable
if [ -z "$SAST_CONFIDENCE_LEVEL" -a "$CONFIDENCE_LEVEL" ]; then
SAST_CONFIDENCE_LEVEL="$CONFIDENCE_LEVEL"
echo "WARNING: CONFIDENCE_LEVEL is deprecated and MUST be replaced with SAST_CONFIDENCE_LEVEL"
fi
docker run --env SAST_CONFIDENCE_LEVEL="${SAST_CONFIDENCE_LEVEL:-3}" \
--volume "$PWD:/code" \
--volume /var/run/docker.sock:/var/run/docker.sock \
"registry.gitlab.com/gitlab-org/security-products/sast:$SP_VERSION" /app/bin/run /code
;;
*)
echo "GitLab EE is required"
;;
esac
}
function dependency_scanning() {
case "$CI_SERVER_VERSION" in
*-ee)
docker run --env DEP_SCAN_DISABLE_REMOTE_CHECKS="${DEP_SCAN_DISABLE_REMOTE_CHECKS:-false}" \
--volume "$PWD:/code" \
--volume /var/run/docker.sock:/var/run/docker.sock \
"registry.gitlab.com/gitlab-org/security-products/dependency-scanning:$SP_VERSION" /code
;;
*)
echo "GitLab EE is required"
;;
esac
}
function get_replicas() {
track="${1:-stable}"
percentage="${2:-100}"
env_track=$( echo $track | tr -s '[:lower:]' '[:upper:]' )
env_slug=$( echo ${CI_ENVIRONMENT_SLUG//-/_} | tr -s '[:lower:]' '[:upper:]' )
if [[ "$track" == "stable" ]] || [[ "$track" == "rollout" ]]; then
# for stable track get number of replicas from `PRODUCTION_REPLICAS`
eval new_replicas=\$${env_slug}_REPLICAS
if [[ -z "$new_replicas" ]]; then
new_replicas=$REPLICAS
fi
else
# for all tracks get number of replicas from `CANARY_PRODUCTION_REPLICAS`
eval new_replicas=\$${env_track}_${env_slug}_REPLICAS
if [[ -z "$new_replicas" ]]; then
eval new_replicas=\${env_track}_REPLICAS
fi
fi
replicas="${new_replicas:-1}"
replicas="$(($replicas * $percentage / 100))"
# always return at least one replicas
if [[ $replicas -gt 0 ]]; then
echo "$replicas"
else
echo 1
fi
}
# Extracts variables prefixed with K8S_SECRET_
# and creates a Kubernetes secret.
#
# e.g. If we have the following environment variables:
# K8S_SECRET_A=value1
# K8S_SECRET_B=multi\ word\ value
#
# Then we will create a secret with the following key-value pairs:
# data:
# A: dmFsdWUxCg==
# B: bXVsdGkgd29yZCB2YWx1ZQo=
function create_application_secret() {
track="${1-stable}"
export APPLICATION_SECRET_NAME=$(application_secret_name "$track")
env | sed -n "s/^K8S_SECRET_\(.*\)$/\1/p" > k8s_prefixed_variables
kubectl create secret \
-n "$KUBE_NAMESPACE" generic "$APPLICATION_SECRET_NAME" \
--from-env-file k8s_prefixed_variables -o yaml --dry-run |
kubectl replace -n "$KUBE_NAMESPACE" --force -f -
export APPLICATION_SECRET_CHECKSUM=$(cat k8s_prefixed_variables | sha256sum | cut -d ' ' -f 1)
rm k8s_prefixed_variables
}
function deploy_name() {
name="$CI_ENVIRONMENT_SLUG"
track="${1-stable}"
if [[ "$track" != "stable" ]]; then
name="$name-$track"
fi
echo $name
}
function application_secret_name() {
track="${1-stable}"
name=$(deploy_name "$track")
echo "${name}-secret"
}
function deploy() {
track="${1-stable}"
percentage="${2:-100}"
name=$(deploy_name "$track")
replicas="1"
service_enabled="true"
postgres_enabled="$POSTGRES_ENABLED"
# if track is different than stable,
# re-use all attached resources
if [[ "$track" != "stable" ]]; then
service_enabled="false"
postgres_enabled="false"
fi
replicas=$(get_replicas "$track" "$percentage")
if [[ "$CI_PROJECT_VISIBILITY" != "public" ]]; then
secret_name='gitlab-registry'
else
secret_name=''
fi
create_application_secret "$track"
env_slug=$(echo ${CI_ENVIRONMENT_SLUG//-/_} | tr -s '[:lower:]' '[:upper:]')
eval env_ADDITIONAL_HOSTS=\$${env_slug}_ADDITIONAL_HOSTS
if [ -n "$env_ADDITIONAL_HOSTS" ]; then
additional_hosts="{$env_ADDITIONAL_HOSTS}"
elif [ -n "$ADDITIONAL_HOSTS" ]; then
additional_hosts="{$ADDITIONAL_HOSTS}"
fi
if [[ -n "$DB_INITIALIZE" && -z "$(helm ls -q "^$name$")" ]]; then
echo "Deploying first release with database initialization..."
helm upgrade --install \
--wait \
--set service.enabled="$service_enabled" \
--set gitlab.app="$CI_PROJECT_PATH_SLUG" \
--set gitlab.env="$CI_ENVIRONMENT_SLUG" \
--set releaseOverride="$CI_ENVIRONMENT_SLUG" \
--set image.repository="$CI_APPLICATION_REPOSITORY" \
--set image.tag="$CI_APPLICATION_TAG" \
--set image.pullPolicy=IfNotPresent \
--set image.secrets[0].name="$secret_name" \
--set application.track="$track" \
--set application.database_url="$DATABASE_URL" \
--set application.secretName="$APPLICATION_SECRET_NAME" \
--set application.secretChecksum="$APPLICATION_SECRET_CHECKSUM" \
--set service.commonName="le.$KUBE_INGRESS_BASE_DOMAIN" \
--set service.url="$CI_ENVIRONMENT_URL" \
--set service.additionalHosts="$additional_hosts" \
--set replicaCount="$replicas" \
--set postgresql.enabled="$postgres_enabled" \
--set postgresql.nameOverride="postgres" \
--set postgresql.postgresUser="$POSTGRES_USER" \
--set postgresql.postgresPassword="$POSTGRES_PASSWORD" \
--set postgresql.postgresDatabase="$POSTGRES_DB" \
--set postgresql.imageTag="$POSTGRES_VERSION" \
--set application.initializeCommand="$DB_INITIALIZE" \
--namespace="$KUBE_NAMESPACE" \
"$name" \
chart/
echo "Deploying second release..."
helm upgrade --reuse-values \
--wait \
--set application.initializeCommand="" \
--set application.migrateCommand="$DB_MIGRATE" \
--namespace="$KUBE_NAMESPACE" \
"$name" \
chart/
else
echo "Deploying new release..."
helm upgrade --install \
--wait \
--set service.enabled="$service_enabled" \
--set gitlab.app="$CI_PROJECT_PATH_SLUG" \
--set gitlab.env="$CI_ENVIRONMENT_SLUG" \
--set releaseOverride="$CI_ENVIRONMENT_SLUG" \
--set image.repository="$CI_APPLICATION_REPOSITORY" \
--set image.tag="$CI_APPLICATION_TAG" \
--set image.pullPolicy=IfNotPresent \
--set image.secrets[0].name="$secret_name" \
--set application.track="$track" \
--set application.database_url="$DATABASE_URL" \
--set application.secretName="$APPLICATION_SECRET_NAME" \
--set application.secretChecksum="$APPLICATION_SECRET_CHECKSUM" \
--set service.commonName="le.$KUBE_INGRESS_BASE_DOMAIN" \
--set service.url="$CI_ENVIRONMENT_URL" \
--set service.additionalHosts="$additional_hosts" \
--set replicaCount="$replicas" \
--set postgresql.enabled="$postgres_enabled" \
--set postgresql.nameOverride="postgres" \
--set postgresql.postgresUser="$POSTGRES_USER" \
--set postgresql.postgresPassword="$POSTGRES_PASSWORD" \
--set postgresql.postgresDatabase="$POSTGRES_DB" \
--set application.migrateCommand="$DB_MIGRATE" \
--namespace="$KUBE_NAMESPACE" \
"$name" \
chart/
fi
kubectl rollout status -n "$KUBE_NAMESPACE" -w "$ROLLOUT_RESOURCE_TYPE/$name"
}
function scale() {
track="${1-stable}"
percentage="${2-100}"
name=$(deploy_name "$track")
replicas=$(get_replicas "$track" "$percentage")
if [[ -n "$(helm ls -q "^$name$")" ]]; then
helm upgrade --reuse-values \
--wait \
--set replicaCount="$replicas" \
--namespace="$KUBE_NAMESPACE" \
"$name" \
chart/
fi
}
function install_dependencies() {
apk add -U openssl curl tar gzip bash ca-certificates git
curl -sSL -o /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub
curl -sSL -O https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.28-r0/glibc-2.28-r0.apk
apk add glibc-2.28-r0.apk
rm glibc-2.28-r0.apk
curl -sS "https://kubernetes-helm.storage.googleapis.com/helm-v${HELM_VERSION}-linux-amd64.tar.gz" | tar zx
mv linux-amd64/helm /usr/bin/
mv linux-amd64/tiller /usr/bin/
helm version --client
tiller -version
curl -sSL -o /usr/bin/kubectl "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
chmod +x /usr/bin/kubectl
kubectl version --client
}
# With the Kubernetes executor, 'localhost' must be used instead
# https://docs.gitlab.com/runner/executors/kubernetes.html
function setup_docker() {
if ! docker info &>/dev/null; then
if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then
export DOCKER_HOST='tcp://localhost:2375'
export DOCKER_SERVICE="localhost"
else
export DOCKER_SERVICE="docker"
fi
fi
}
function setup_test_db() {
if [ -z ${KUBERNETES_PORT+x} ]; then
DB_HOST=postgres
else
DB_HOST=localhost
fi
export DATABASE_URL="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DB_HOST}:5432/${POSTGRES_DB}"
}
function download_chart() {
if [[ ! -d chart ]]; then
auto_chart=${AUTO_DEVOPS_CHART:-gitlab/auto-deploy-app}
auto_chart_name=$(basename $auto_chart)
auto_chart_name=${auto_chart_name%.tgz}
auto_chart_name=${auto_chart_name%.tar.gz}
else
auto_chart="chart"
auto_chart_name="chart"
fi
helm init --client-only
helm repo add gitlab ${AUTO_DEVOPS_CHART_REPOSITORY:-https://charts.gitlab.io}
if [[ ! -d "$auto_chart" ]]; then
helm fetch ${auto_chart} --untar
fi
if [ "$auto_chart_name" != "chart" ]; then
mv ${auto_chart_name} chart
fi
helm dependency update chart/
helm dependency build chart/
}
function ensure_namespace() {
kubectl describe namespace "$KUBE_NAMESPACE" || kubectl create namespace "$KUBE_NAMESPACE"
}
# Function to ensure backwards compatibility with AUTO_DEVOPS_DOMAIN
function ensure_kube_ingress_base_domain() {
if [ -z ${KUBE_INGRESS_BASE_DOMAIN+x} ] && [ -n "$AUTO_DEVOPS_DOMAIN" ] ; then
export KUBE_INGRESS_BASE_DOMAIN=$AUTO_DEVOPS_DOMAIN
fi
}
function check_kube_domain() {
ensure_kube_ingress_base_domain
if [[ -z "$KUBE_INGRESS_BASE_DOMAIN" ]]; then
echo "In order to deploy or use Review Apps,"
echo "AUTO_DEVOPS_DOMAIN or KUBE_INGRESS_BASE_DOMAIN variables must be set"
echo "From 11.8, you can set KUBE_INGRESS_BASE_DOMAIN in cluster settings"
echo "or by defining a variable at group or project level."
echo "You can also manually add it in .gitlab-ci.yml"
echo "AUTO_DEVOPS_DOMAIN support will be dropped on 12.0"
false
else
true
fi
}
function initialize_tiller() {
echo "Checking Tiller..."
export HELM_HOST="localhost:44134"
tiller -listen ${HELM_HOST} -alsologtostderr > /dev/null 2>&1 &
echo "Tiller is listening on ${HELM_HOST}"
if ! helm version --debug; then
echo "Failed to init Tiller."
return 1
fi
echo ""
}
function create_secret() {
echo "Create secret..."
if [[ "$CI_PROJECT_VISIBILITY" == "public" ]]; then
return
fi
kubectl create secret -n "$KUBE_NAMESPACE" \
docker-registry gitlab-registry \
--docker-server="$CI_REGISTRY" \
--docker-username="${CI_DEPLOY_USER:-$CI_REGISTRY_USER}" \
--docker-password="${CI_DEPLOY_PASSWORD:-$CI_REGISTRY_PASSWORD}" \
--docker-email="$GITLAB_USER_EMAIL" \
-o yaml --dry-run | kubectl replace -n "$KUBE_NAMESPACE" --force -f -
}
function dast() {
export CI_ENVIRONMENT_URL=$(cat environment_url.txt)
mkdir /zap/wrk/
/zap/zap-baseline.py -J gl-dast-report.json -t "$CI_ENVIRONMENT_URL" || true
cp /zap/wrk/gl-dast-report.json .
}
function performance() {
export CI_ENVIRONMENT_URL=$(cat environment_url.txt)
mkdir gitlab-exporter
wget -O gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/10-5/index.js
mkdir sitespeed-results
if [ -f .gitlab-urls.txt ]
then
sed -i -e 's@^@'"$CI_ENVIRONMENT_URL"'@' .gitlab-urls.txt
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results .gitlab-urls.txt
else
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results "$CI_ENVIRONMENT_URL"
fi
mv sitespeed-results/data/performance.json performance.json
}
function persist_environment_url() {
echo $CI_ENVIRONMENT_URL > environment_url.txt
}
function delete() {
track="${1-stable}"
name=$(deploy_name "$track")
if [[ -n "$(helm ls -q "^$name$")" ]]; then
helm delete --purge "$name"
fi
secret_name=$(application_secret_name "$track")
kubectl delete secret --ignore-not-found -n "$KUBE_NAMESPACE" "$secret_name"
}
before_script:
- *auto_devops
include:
- template: Jobs/Build.gitlab-ci.yml
- template: Jobs/Test.gitlab-ci.yml
- template: Jobs/Code-Quality.gitlab-ci.yml
- template: Jobs/Deploy.gitlab-ci.yml
- template: Jobs/Browser-Performance-Testing.gitlab-ci.yml
- template: Jobs/DAST.gitlab-ci.yml
- template: Security/Container-Scanning.gitlab-ci.yml
- template: Security/Dependency-Scanning.gitlab-ci.yml
- template: Security/License-Management.gitlab-ci.yml
- template: Security/SAST.gitlab-ci.yml
performance:
stage: performance
image: docker:stable
allow_failure: true
services:
- docker:stable-dind
script:
- |
if ! docker info &>/dev/null; then
if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then
export DOCKER_HOST='tcp://localhost:2375'
fi
fi
- export CI_ENVIRONMENT_URL=$(cat environment_url.txt)
- mkdir gitlab-exporter
- wget -O gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/10-5/index.js
- mkdir sitespeed-results
- |
if [ -f .gitlab-urls.txt ]
then
sed -i -e 's@^@'"$CI_ENVIRONMENT_URL"'@' .gitlab-urls.txt
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results .gitlab-urls.txt
else
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results "$CI_ENVIRONMENT_URL"
fi
- mv sitespeed-results/data/performance.json performance.json
artifacts:
paths:
- performance.json
- sitespeed-results/
only:
refs:
- branches
- tags
kubernetes: active
except:
variables:
- $PERFORMANCE_DISABLED
build:
stage: build
image: "registry.gitlab.com/gitlab-org/cluster-integration/auto-build-image/master:stable"
services:
- docker:stable-dind
script:
- |
if [[ -z "$CI_COMMIT_TAG" ]]; then
export CI_APPLICATION_REPOSITORY=${CI_APPLICATION_REPOSITORY:-$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG}
export CI_APPLICATION_TAG=${CI_APPLICATION_TAG:-$CI_COMMIT_SHA}
else
export CI_APPLICATION_REPOSITORY=${CI_APPLICATION_REPOSITORY:-$CI_REGISTRY_IMAGE}
export CI_APPLICATION_TAG=${CI_APPLICATION_TAG:-$CI_COMMIT_TAG}
fi
- /build/build.sh
only:
- branches
- tags
code_quality:
stage: test
image: docker:stable
allow_failure: true
services:
- docker:stable-dind
script:
- export CQ_VERSION=$(echo "$CI_SERVER_VERSION" | sed 's/^\([0-9]*\)\.\([0-9]*\).*/\1-\2-stable/')
- |
if ! docker info &>/dev/null; then
if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then
export DOCKER_HOST='tcp://localhost:2375'
fi
fi
- |
docker run --env SOURCE_CODE="$PWD" \
--volume "$PWD":/code \
--volume /var/run/docker.sock:/var/run/docker.sock \
"registry.gitlab.com/gitlab-org/security-products/codequality:$CQ_VERSION" /code
artifacts:
paths: [gl-code-quality-report.json]
only:
- branches
- tags
except:
variables:
- $CODE_QUALITY_DISABLED
dast:
stage: dast
image: docker:stable
variables:
DOCKER_DRIVER: overlay2
allow_failure: true
services:
- docker:stable-dind
script:
- export DAST_WEBSITE=${DAST_WEBSITE:-$(cat environment_url.txt)}
- export DAST_VERSION=${SP_VERSION:-$(echo "$CI_SERVER_VERSION" | sed 's/^\([0-9]*\)\.\([0-9]*\).*/\1-\2-stable/')}
- |
if ! docker info &>/dev/null; then
if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then
export DOCKER_HOST='tcp://localhost:2375'
fi
fi
- |
function dast_run() {
docker run \
--env DAST_TARGET_AVAILABILITY_TIMEOUT \
--volume "$PWD:/output" \
--volume /var/run/docker.sock:/var/run/docker.sock \
-w /output \
"registry.gitlab.com/gitlab-org/security-products/dast:$DAST_VERSION" \
/analyze -t $DAST_WEBSITE \
"$@"
}
- |
if [ -n "$DAST_AUTH_URL" ]
then
dast_run \
--auth-url $DAST_AUTH_URL \
--auth-username $DAST_USERNAME \
--auth-password $DAST_PASSWORD \
--auth-username-field $DAST_USERNAME_FIELD \
--auth-password-field $DAST_PASSWORD_FIELD
else
dast_run
fi
artifacts:
reports:
dast: gl-dast-report.json
only:
refs:
- branches
- tags
variables:
- $GITLAB_FEATURES =~ /\bdast\b/
except:
refs:
- master
variables:
- $DAST_DISABLED
review:
stage: review
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- initialize_tiller
- create_secret
- deploy
- persist_environment_url
environment:
name: review/$CI_COMMIT_REF_NAME
url: http://$CI_PROJECT_ID-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN
on_stop: stop_review
artifacts:
paths: [environment_url.txt]
only:
refs:
- branches
- tags
kubernetes: active
except:
refs:
- master
variables:
- $REVIEW_DISABLED
stop_review:
stage: cleanup
variables:
GIT_STRATEGY: none
script:
- install_dependencies
- initialize_tiller
- delete
environment:
name: review/$CI_COMMIT_REF_NAME
action: stop
when: manual
allow_failure: true
only:
refs:
- branches
- tags
kubernetes: active
except:
refs:
- master
variables:
- $REVIEW_DISABLED
# Staging deploys are disabled by default since
# continuous deployment to production is enabled by default
# If you prefer to automatically deploy to staging and
# only manually promote to production, enable this job by setting
# STAGING_ENABLED.
staging:
stage: staging
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- initialize_tiller
- create_secret
- deploy
environment:
name: staging
url: http://$CI_PROJECT_PATH_SLUG-staging.$KUBE_INGRESS_BASE_DOMAIN
only:
refs:
- master
kubernetes: active
variables:
- $STAGING_ENABLED
# Canaries are disabled by default, but if you want them,
# and know what the downsides are, you can enable this by setting
# CANARY_ENABLED.
canary:
stage: canary
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- initialize_tiller
- create_secret
- deploy canary
environment:
name: production
url: http://$CI_PROJECT_PATH_SLUG.$KUBE_INGRESS_BASE_DOMAIN
when: manual
only:
refs:
- master
kubernetes: active
variables:
- $CANARY_ENABLED
.production: &production_template
stage: production
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- initialize_tiller
- create_secret
- deploy
- delete canary
- delete rollout
- persist_environment_url
environment:
name: production
url: http://$CI_PROJECT_PATH_SLUG.$KUBE_INGRESS_BASE_DOMAIN
artifacts:
paths: [environment_url.txt]
production:
<<: *production_template
only:
refs:
- master
kubernetes: active
except:
variables:
- $STAGING_ENABLED
- $CANARY_ENABLED
- $INCREMENTAL_ROLLOUT_ENABLED
- $INCREMENTAL_ROLLOUT_MODE
production_manual:
<<: *production_template
when: manual
allow_failure: false
only:
refs:
- master
kubernetes: active
variables:
- $STAGING_ENABLED
- $CANARY_ENABLED
except:
variables:
- $INCREMENTAL_ROLLOUT_ENABLED
- $INCREMENTAL_ROLLOUT_MODE
# This job implements incremental rollout on for every push to `master`.
.rollout: &rollout_template
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- initialize_tiller
- create_secret
- deploy rollout $ROLLOUT_PERCENTAGE
- scale stable $((100-ROLLOUT_PERCENTAGE))
- delete canary
- persist_environment_url
environment:
name: production
url: http://$CI_PROJECT_PATH_SLUG.$KUBE_INGRESS_BASE_DOMAIN
artifacts:
paths: [environment_url.txt]
.manual_rollout_template: &manual_rollout_template
<<: *rollout_template
stage: production
when: manual
# This selectors are backward compatible mode with $INCREMENTAL_ROLLOUT_ENABLED (before 11.4)
only:
refs:
- master
kubernetes: active
variables:
- $INCREMENTAL_ROLLOUT_MODE == "manual"
- $INCREMENTAL_ROLLOUT_ENABLED
except:
variables:
- $INCREMENTAL_ROLLOUT_MODE == "timed"
.timed_rollout_template: &timed_rollout_template
<<: *rollout_template
when: delayed
start_in: 5 minutes
only:
refs:
- master
kubernetes: active
variables:
- $INCREMENTAL_ROLLOUT_MODE == "timed"
timed rollout 10%:
<<: *timed_rollout_template
stage: incremental rollout 10%
variables:
ROLLOUT_PERCENTAGE: 10
timed rollout 25%:
<<: *timed_rollout_template
stage: incremental rollout 25%
variables:
ROLLOUT_PERCENTAGE: 25
timed rollout 50%:
<<: *timed_rollout_template
stage: incremental rollout 50%
variables:
ROLLOUT_PERCENTAGE: 50
timed rollout 100%:
<<: *timed_rollout_template
<<: *production_template
stage: incremental rollout 100%
variables:
ROLLOUT_PERCENTAGE: 100
rollout 10%:
<<: *manual_rollout_template
variables:
ROLLOUT_PERCENTAGE: 10
rollout 25%:
<<: *manual_rollout_template
variables:
ROLLOUT_PERCENTAGE: 25
rollout 50%:
<<: *manual_rollout_template
variables:
ROLLOUT_PERCENTAGE: 50
rollout 100%:
<<: *manual_rollout_template
<<: *production_template
allow_failure: false
.deploy_helpers: &deploy_helpers |
[[ "$TRACE" ]] && set -x
auto_database_url=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${CI_ENVIRONMENT_SLUG}-postgres:5432/${POSTGRES_DB}
export DATABASE_URL=${DATABASE_URL-$auto_database_url}
export TILLER_NAMESPACE=$KUBE_NAMESPACE
# Extract "MAJOR.MINOR" from CI_SERVER_VERSION and generate "MAJOR-MINOR-stable" for Security Products
function get_replicas() {
track="${1:-stable}"
percentage="${2:-100}"
env_track=$( echo $track | tr -s '[:lower:]' '[:upper:]' )
env_slug=$( echo ${CI_ENVIRONMENT_SLUG//-/_} | tr -s '[:lower:]' '[:upper:]' )
if [[ "$track" == "stable" ]] || [[ "$track" == "rollout" ]]; then
# for stable track get number of replicas from `PRODUCTION_REPLICAS`
eval new_replicas=\$${env_slug}_REPLICAS
if [[ -z "$new_replicas" ]]; then
new_replicas=$REPLICAS
fi
else
# for all tracks get number of replicas from `CANARY_PRODUCTION_REPLICAS`
eval new_replicas=\$${env_track}_${env_slug}_REPLICAS
if [[ -z "$new_replicas" ]]; then
eval new_replicas=\${env_track}_REPLICAS
fi
fi
replicas="${new_replicas:-1}"
replicas="$(($replicas * $percentage / 100))"
# always return at least one replicas
if [[ $replicas -gt 0 ]]; then
echo "$replicas"
else
echo 1
fi
}
# Extracts variables prefixed with K8S_SECRET_
# and creates a Kubernetes secret.
#
# e.g. If we have the following environment variables:
# K8S_SECRET_A=value1
# K8S_SECRET_B=multi\ word\ value
#
# Then we will create a secret with the following key-value pairs:
# data:
# A: dmFsdWUxCg==
# B: bXVsdGkgd29yZCB2YWx1ZQo=
function create_application_secret() {
track="${1-stable}"
export APPLICATION_SECRET_NAME=$(application_secret_name "$track")
env | sed -n "s/^K8S_SECRET_\(.*\)$/\1/p" > k8s_prefixed_variables
kubectl create secret \
-n "$KUBE_NAMESPACE" generic "$APPLICATION_SECRET_NAME" \
--from-env-file k8s_prefixed_variables -o yaml --dry-run |
kubectl replace -n "$KUBE_NAMESPACE" --force -f -
export APPLICATION_SECRET_CHECKSUM=$(cat k8s_prefixed_variables | sha256sum | cut -d ' ' -f 1)
rm k8s_prefixed_variables
}
function deploy_name() {
name="$CI_ENVIRONMENT_SLUG"
track="${1-stable}"
if [[ "$track" != "stable" ]]; then
name="$name-$track"
fi
echo $name
}
function application_secret_name() {
track="${1-stable}"
name=$(deploy_name "$track")
echo "${name}-secret"
}
function deploy() {
track="${1-stable}"
percentage="${2:-100}"
name=$(deploy_name "$track")
if [[ -z "$CI_COMMIT_TAG" ]]; then
image_repository=${CI_APPLICATION_REPOSITORY:-$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG}
image_tag=${CI_APPLICATION_TAG:-$CI_COMMIT_SHA}
else
image_repository=${CI_APPLICATION_REPOSITORY:-$CI_REGISTRY_IMAGE}
image_tag=${CI_APPLICATION_TAG:-$CI_COMMIT_TAG}
fi
replicas="1"
service_enabled="true"
postgres_enabled="$POSTGRES_ENABLED"
# if track is different than stable,
# re-use all attached resources
if [[ "$track" != "stable" ]]; then
service_enabled="false"
postgres_enabled="false"
fi
replicas=$(get_replicas "$track" "$percentage")
if [[ "$CI_PROJECT_VISIBILITY" != "public" ]]; then
secret_name='gitlab-registry'
else
secret_name=''
fi
create_application_secret "$track"
env_slug=$(echo ${CI_ENVIRONMENT_SLUG//-/_} | tr -s '[:lower:]' '[:upper:]')
eval env_ADDITIONAL_HOSTS=\$${env_slug}_ADDITIONAL_HOSTS
if [ -n "$env_ADDITIONAL_HOSTS" ]; then
additional_hosts="{$env_ADDITIONAL_HOSTS}"
elif [ -n "$ADDITIONAL_HOSTS" ]; then
additional_hosts="{$ADDITIONAL_HOSTS}"
fi
if [[ -n "$DB_INITIALIZE" && -z "$(helm ls -q "^$name$")" ]]; then
echo "Deploying first release with database initialization..."
helm upgrade --install \
--wait \
--set service.enabled="$service_enabled" \
--set gitlab.app="$CI_PROJECT_PATH_SLUG" \
--set gitlab.env="$CI_ENVIRONMENT_SLUG" \
--set releaseOverride="$CI_ENVIRONMENT_SLUG" \
--set image.repository="$image_repository" \
--set image.tag="$image_tag" \
--set image.pullPolicy=IfNotPresent \
--set image.secrets[0].name="$secret_name" \
--set application.track="$track" \
--set application.database_url="$DATABASE_URL" \
--set application.secretName="$APPLICATION_SECRET_NAME" \
--set application.secretChecksum="$APPLICATION_SECRET_CHECKSUM" \
--set service.commonName="le.$KUBE_INGRESS_BASE_DOMAIN" \
--set service.url="$CI_ENVIRONMENT_URL" \
--set service.additionalHosts="$additional_hosts" \
--set replicaCount="$replicas" \
--set postgresql.enabled="$postgres_enabled" \
--set postgresql.nameOverride="postgres" \
--set postgresql.postgresUser="$POSTGRES_USER" \
--set postgresql.postgresPassword="$POSTGRES_PASSWORD" \
--set postgresql.postgresDatabase="$POSTGRES_DB" \
--set postgresql.imageTag="$POSTGRES_VERSION" \
--set application.initializeCommand="$DB_INITIALIZE" \
--namespace="$KUBE_NAMESPACE" \
"$name" \
chart/
echo "Deploying second release..."
helm upgrade --reuse-values \
--wait \
--set application.initializeCommand="" \
--set application.migrateCommand="$DB_MIGRATE" \
--namespace="$KUBE_NAMESPACE" \
"$name" \
chart/
else
echo "Deploying new release..."
helm upgrade --install \
--wait \
--set service.enabled="$service_enabled" \
--set gitlab.app="$CI_PROJECT_PATH_SLUG" \
--set gitlab.env="$CI_ENVIRONMENT_SLUG" \
--set releaseOverride="$CI_ENVIRONMENT_SLUG" \
--set image.repository="$image_repository" \
--set image.tag="$image_tag" \
--set image.pullPolicy=IfNotPresent \
--set image.secrets[0].name="$secret_name" \
--set application.track="$track" \
--set application.database_url="$DATABASE_URL" \
--set application.secretName="$APPLICATION_SECRET_NAME" \
--set application.secretChecksum="$APPLICATION_SECRET_CHECKSUM" \
--set service.commonName="le.$KUBE_INGRESS_BASE_DOMAIN" \
--set service.url="$CI_ENVIRONMENT_URL" \
--set service.additionalHosts="$additional_hosts" \
--set replicaCount="$replicas" \
--set postgresql.enabled="$postgres_enabled" \
--set postgresql.nameOverride="postgres" \
--set postgresql.postgresUser="$POSTGRES_USER" \
--set postgresql.postgresPassword="$POSTGRES_PASSWORD" \
--set postgresql.postgresDatabase="$POSTGRES_DB" \
--set application.migrateCommand="$DB_MIGRATE" \
--namespace="$KUBE_NAMESPACE" \
"$name" \
chart/
fi
kubectl rollout status -n "$KUBE_NAMESPACE" -w "$ROLLOUT_RESOURCE_TYPE/$name"
}
function scale() {
track="${1-stable}"
percentage="${2-100}"
name=$(deploy_name "$track")
replicas=$(get_replicas "$track" "$percentage")
if [[ -n "$(helm ls -q "^$name$")" ]]; then
helm upgrade --reuse-values \
--wait \
--set replicaCount="$replicas" \
--namespace="$KUBE_NAMESPACE" \
"$name" \
chart/
fi
}
function install_dependencies() {
apk add -U openssl curl tar gzip bash ca-certificates git
curl -sSL -o /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub
curl -sSL -O https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.28-r0/glibc-2.28-r0.apk
apk add glibc-2.28-r0.apk
rm glibc-2.28-r0.apk
curl -sS "https://kubernetes-helm.storage.googleapis.com/helm-v${HELM_VERSION}-linux-amd64.tar.gz" | tar zx
mv linux-amd64/helm /usr/bin/
mv linux-amd64/tiller /usr/bin/
helm version --client
tiller -version
curl -sSL -o /usr/bin/kubectl "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
chmod +x /usr/bin/kubectl
kubectl version --client
}
function download_chart() {
if [[ ! -d chart ]]; then
auto_chart=${AUTO_DEVOPS_CHART:-gitlab/auto-deploy-app}
auto_chart_name=$(basename $auto_chart)
auto_chart_name=${auto_chart_name%.tgz}
auto_chart_name=${auto_chart_name%.tar.gz}
else
auto_chart="chart"
auto_chart_name="chart"
fi
helm init --client-only
helm repo add gitlab ${AUTO_DEVOPS_CHART_REPOSITORY:-https://charts.gitlab.io}
if [[ ! -d "$auto_chart" ]]; then
helm fetch ${auto_chart} --untar
fi
if [ "$auto_chart_name" != "chart" ]; then
mv ${auto_chart_name} chart
fi
helm dependency update chart/
helm dependency build chart/
}
function ensure_namespace() {
kubectl describe namespace "$KUBE_NAMESPACE" || kubectl create namespace "$KUBE_NAMESPACE"
}
# Function to ensure backwards compatibility with AUTO_DEVOPS_DOMAIN
function ensure_kube_ingress_base_domain() {
if [ -z ${KUBE_INGRESS_BASE_DOMAIN+x} ] && [ -n "$AUTO_DEVOPS_DOMAIN" ] ; then
export KUBE_INGRESS_BASE_DOMAIN=$AUTO_DEVOPS_DOMAIN
fi
}
function check_kube_domain() {
ensure_kube_ingress_base_domain
if [[ -z "$KUBE_INGRESS_BASE_DOMAIN" ]]; then
echo "In order to deploy or use Review Apps,"
echo "AUTO_DEVOPS_DOMAIN or KUBE_INGRESS_BASE_DOMAIN variables must be set"
echo "From 11.8, you can set KUBE_INGRESS_BASE_DOMAIN in cluster settings"
echo "or by defining a variable at group or project level."
echo "You can also manually add it in .gitlab-ci.yml"
echo "AUTO_DEVOPS_DOMAIN support will be dropped on 12.0"
false
else
true
fi
}
function initialize_tiller() {
echo "Checking Tiller..."
export HELM_HOST="localhost:44134"
tiller -listen ${HELM_HOST} -alsologtostderr > /dev/null 2>&1 &
echo "Tiller is listening on ${HELM_HOST}"
if ! helm version --debug; then
echo "Failed to init Tiller."
return 1
fi
echo ""
}
function create_secret() {
echo "Create secret..."
if [[ "$CI_PROJECT_VISIBILITY" == "public" ]]; then
return
fi
kubectl create secret -n "$KUBE_NAMESPACE" \
docker-registry gitlab-registry \
--docker-server="$CI_REGISTRY" \
--docker-username="${CI_DEPLOY_USER:-$CI_REGISTRY_USER}" \
--docker-password="${CI_DEPLOY_PASSWORD:-$CI_REGISTRY_PASSWORD}" \
--docker-email="$GITLAB_USER_EMAIL" \
-o yaml --dry-run | kubectl replace -n "$KUBE_NAMESPACE" --force -f -
}
function persist_environment_url() {
echo $CI_ENVIRONMENT_URL > environment_url.txt
}
function delete() {
track="${1-stable}"
name=$(deploy_name "$track")
if [[ -n "$(helm ls -q "^$name$")" ]]; then
helm delete --purge "$name"
fi
secret_name=$(application_secret_name "$track")
kubectl delete secret --ignore-not-found -n "$KUBE_NAMESPACE" "$secret_name"
}
before_script:
- *deploy_helpers
test:
services:
- postgres:latest
variables:
POSTGRES_DB: test
stage: test
image: gliderlabs/herokuish:latest
script:
- |
if [ -z ${KUBERNETES_PORT+x} ]; then
DB_HOST=postgres
else
DB_HOST=localhost
fi
- export DATABASE_URL="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DB_HOST}:5432/${POSTGRES_DB}"
- cp -R . /tmp/app
- /bin/herokuish buildpack test
only:
- branches
- tags
except:
variables:
- $TEST_DISABLED
......@@ -28,6 +28,12 @@ container_scanning:
- docker:stable-dind
script:
- if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then { export DOCKER_SERVICE="localhost" ; export DOCKER_HOST="tcp://${DOCKER_SERVICE}:2375" ; } fi
- |
if [[ -n "$CI_REGISTRY_USER" ]]; then
echo "Logging to GitLab Container Registry with CI credentials..."
docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" "$CI_REGISTRY"
echo ""
fi
- docker run -d --name db arminc/clair-db:latest
- docker run -p 6060:6060 --link db:postgres -d --name clair --restart on-failure arminc/clair-local-scan:${CLAIR_LOCAL_SCAN_VERSION}
- apk add -U wget ca-certificates
......@@ -36,7 +42,6 @@ container_scanning:
- mv clair-scanner_linux_amd64 clair-scanner
- chmod +x clair-scanner
- touch clair-whitelist.yml
- while( ! wget -q -O /dev/null http://${DOCKER_SERVICE}:6060/v1/namespaces ) ; do sleep 1 ; done
- retries=0
- echo "Waiting for clair daemon to start"
- while( ! wget -T 10 -q -O /dev/null http://${DOCKER_SERVICE}:6060/v1/namespaces ) ; do sleep 1 ; echo -n "." ; if [ $retries -eq 10 ] ; then echo " Timeout, aborting." ; exit 1 ; fi ; retries=$(($retries+1)) ; done
......
......@@ -4,6 +4,9 @@
# List of the variables: https://gitlab.com/gitlab-org/security-products/dast#settings
# How to set: https://docs.gitlab.com/ee/ci/yaml/#variables
include:
- template: Jobs/DAST.gitlab-ci.yml
variables:
DAST_WEBSITE: http://example.com # Please edit to be your website to scan for vulnerabilities
......@@ -14,46 +17,10 @@ stages:
- dast
dast:
stage: dast
image: docker:stable
variables:
DOCKER_DRIVER: overlay2
allow_failure: true
services:
- docker:stable-dind
script:
- export DAST_VERSION=${SP_VERSION:-$(echo "$CI_SERVER_VERSION" | sed 's/^\([0-9]*\)\.\([0-9]*\).*/\1-\2-stable/')}
- |
function dast_run() {
docker run \
--env DAST_TARGET_AVAILABILITY_TIMEOUT \
--volume "$PWD:/output" \
--volume /var/run/docker.sock:/var/run/docker.sock \
-w /output \
"registry.gitlab.com/gitlab-org/security-products/dast:$DAST_VERSION" \
/analyze -t $DAST_WEBSITE \
"$@"
}
- |
if [ -n "$DAST_AUTH_URL" ]
then
dast_run \
--auth-url $DAST_AUTH_URL \
--auth-username $DAST_USERNAME \
--auth-password $DAST_PASSWORD \
--auth-username-field $DAST_USERNAME_FIELD \
--auth-password-field $DAST_PASSWORD_FIELD
else
dast_run
fi
artifacts:
reports:
dast: gl-dast-report.json
only:
refs:
- branches
variables:
- $GITLAB_FEATURES =~ /\bdast\b/
except:
refs: [] # Override default from template
variables:
- $DAST_DISABLED
......@@ -14,6 +14,12 @@ dependency_scanning:
- docker:stable-dind
script:
- export DS_VERSION=${SP_VERSION:-$(echo "$CI_SERVER_VERSION" | sed 's/^\([0-9]*\)\.\([0-9]*\).*/\1-\2-stable/')}
- |
if ! docker info &>/dev/null; then
if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then
export DOCKER_HOST='tcp://localhost:2375'
fi
fi
- |
docker run \
--env DS_ANALYZER_IMAGES \
......
......@@ -14,6 +14,12 @@ sast:
- docker:stable-dind
script:
- export SAST_VERSION=${SP_VERSION:-$(echo "$CI_SERVER_VERSION" | sed 's/^\([0-9]*\)\.\([0-9]*\).*/\1-\2-stable/')}
- |
if ! docker info &>/dev/null; then
if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then
export DOCKER_HOST='tcp://localhost:2375'
fi
fi
- |
docker run \
--env SAST_ANALYZER_IMAGES \
......
......@@ -4,6 +4,7 @@ require 'spec_helper'
describe "CI YML Templates" do
ABSTRACT_TEMPLATES = %w[Serverless].freeze
PROJECT_DEPENDENT_TEMPLATES = %w[Auto-DevOps].freeze
def self.concrete_templates
Gitlab::Template::GitlabCiYmlTemplate.all.reject do |template|
......@@ -20,7 +21,10 @@ describe "CI YML Templates" do
describe 'concrete templates with CI/CD jobs' do
concrete_templates.each do |template|
it "#{template.name} template should be valid" do
expect { Gitlab::Ci::YamlProcessor.new(template.content) }
# Trigger processing of included files
project = create(:project, :test_repo) if PROJECT_DEPENDENT_TEMPLATES.include?(template.name)
expect { Gitlab::Ci::YamlProcessor.new(template.content, project: project) }
.not_to raise_error
end
end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment