Commit b71a496c authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent c2041156
Please view this file on the master branch, on stable branches it's out of date. Please view this file on the master branch, on stable branches it's out of date.
## 12.9.3 (2020-04-14)
### Security (1 change)
- Fix filename bypass when uploading NuGet packages.
## 12.9.2 (2020-03-31) ## 12.9.2 (2020-03-31)
### Fixed (4 changes) ### Fixed (4 changes)
...@@ -152,6 +159,13 @@ Please view this file on the master branch, on stable branches it's out of date. ...@@ -152,6 +159,13 @@ Please view this file on the master branch, on stable branches it's out of date.
- Allow users to be marked as service users. !202680 - Allow users to be marked as service users. !202680
## 12.8.9 (2020-04-14)
### Security (1 change)
- Fix filename bypass when uploading NuGet packages.
## 12.8.7 (2020-03-16) ## 12.8.7 (2020-03-16)
### Fixed (1 change) ### Fixed (1 change)
...@@ -305,6 +319,13 @@ Please view this file on the master branch, on stable branches it's out of date. ...@@ -305,6 +319,13 @@ Please view this file on the master branch, on stable branches it's out of date.
- Prepare DB structure for GMA forking changes. !22002 - Prepare DB structure for GMA forking changes. !22002
## 12.7.9 (2020-04-14)
### Security (1 change)
- Fix filename bypass when uploading NuGet packages.
## 12.7.5 ## 12.7.5
### Fixed (1 change) ### Fixed (1 change)
......
...@@ -163,7 +163,7 @@ gem 'diffy', '~> 3.3' ...@@ -163,7 +163,7 @@ gem 'diffy', '~> 3.3'
gem 'diff_match_patch', '~> 0.1.0' gem 'diff_match_patch', '~> 0.1.0'
# Application server # Application server
gem 'rack', '~> 2.0.7' gem 'rack', '~> 2.0.9'
group :unicorn do group :unicorn do
gem 'unicorn', '~> 5.4.1' gem 'unicorn', '~> 5.4.1'
......
...@@ -173,7 +173,7 @@ GEM ...@@ -173,7 +173,7 @@ GEM
concord (0.1.5) concord (0.1.5)
adamantium (~> 0.2.0) adamantium (~> 0.2.0)
equalizer (~> 0.0.9) equalizer (~> 0.0.9)
concurrent-ruby (1.1.5) concurrent-ruby (1.1.6)
connection_pool (2.2.2) connection_pool (2.2.2)
contracts (0.11.0) contracts (0.11.0)
cork (0.3.0) cork (0.3.0)
...@@ -788,7 +788,7 @@ GEM ...@@ -788,7 +788,7 @@ GEM
public_suffix (4.0.3) public_suffix (4.0.3)
pyu-ruby-sasl (0.0.3.3) pyu-ruby-sasl (0.0.3.3)
raabro (1.1.6) raabro (1.1.6)
rack (2.0.7) rack (2.0.9)
rack-accept (0.4.5) rack-accept (0.4.5)
rack (>= 0.4) rack (>= 0.4)
rack-attack (6.2.0) rack-attack (6.2.0)
...@@ -859,17 +859,17 @@ GEM ...@@ -859,17 +859,17 @@ GEM
json json
recursive-open-struct (1.1.0) recursive-open-struct (1.1.0)
redis (4.1.3) redis (4.1.3)
redis-actionpack (5.1.0) redis-actionpack (5.2.0)
actionpack (>= 4.0, < 7) actionpack (>= 5, < 7)
redis-rack (>= 1, < 3) redis-rack (>= 2.1.0, < 3)
redis-store (>= 1.1.0, < 2) redis-store (>= 1.1.0, < 2)
redis-activesupport (5.2.0) redis-activesupport (5.2.0)
activesupport (>= 3, < 7) activesupport (>= 3, < 7)
redis-store (>= 1.3, < 2) redis-store (>= 1.3, < 2)
redis-namespace (1.6.0) redis-namespace (1.6.0)
redis (>= 3.0.4) redis (>= 3.0.4)
redis-rack (2.0.6) redis-rack (2.1.2)
rack (>= 1.5, < 3) rack (>= 2.0.8, < 3)
redis-store (>= 1.2, < 2) redis-store (>= 1.2, < 2)
redis-rails (5.0.2) redis-rails (5.0.2)
redis-actionpack (>= 5.0, < 6) redis-actionpack (>= 5.0, < 6)
...@@ -1331,7 +1331,7 @@ DEPENDENCIES ...@@ -1331,7 +1331,7 @@ DEPENDENCIES
prometheus-client-mmap (~> 0.10.0) prometheus-client-mmap (~> 0.10.0)
pry-byebug (~> 3.5.1) pry-byebug (~> 3.5.1)
pry-rails (~> 0.3.9) pry-rails (~> 0.3.9)
rack (~> 2.0.7) rack (~> 2.0.9)
rack-attack (~> 6.2.0) rack-attack (~> 6.2.0)
rack-cors (~> 1.0.6) rack-cors (~> 1.0.6)
rack-oauth2 (~> 1.9.3) rack-oauth2 (~> 1.9.3)
......
...@@ -6,31 +6,32 @@ class ActiveSession ...@@ -6,31 +6,32 @@ class ActiveSession
SESSION_BATCH_SIZE = 200 SESSION_BATCH_SIZE = 200
ALLOWED_NUMBER_OF_ACTIVE_SESSIONS = 100 ALLOWED_NUMBER_OF_ACTIVE_SESSIONS = 100
attr_writer :session_id
attr_accessor :created_at, :updated_at, attr_accessor :created_at, :updated_at,
:ip_address, :browser, :os, :ip_address, :browser, :os,
:device_name, :device_type, :device_name, :device_type,
:is_impersonated :is_impersonated, :session_id
def current?(session) def current?(session)
return false if session_id.nil? || session.id.nil? return false if session_id.nil? || session.id.nil?
session_id == session.id # Rack v2.0.8+ added private_id, which uses the hash of the
# public_id to avoid timing attacks.
session_id.private_id == session.id.private_id
end end
def human_device_type def human_device_type
device_type&.titleize device_type&.titleize
end end
# This is not the same as Rack::Session::SessionId#public_id, but we
# need to preserve this for backwards compatibility.
def public_id def public_id
encrypted_id = Gitlab::CryptoHelper.aes256_gcm_encrypt(session_id) Gitlab::CryptoHelper.aes256_gcm_encrypt(session_id.public_id)
CGI.escape(encrypted_id)
end end
def self.set(user, request) def self.set(user, request)
Gitlab::Redis::SharedState.with do |redis| Gitlab::Redis::SharedState.with do |redis|
session_id = request.session.id session_id = request.session.id.public_id
client = DeviceDetector.new(request.user_agent) client = DeviceDetector.new(request.user_agent)
timestamp = Time.current timestamp = Time.current
...@@ -63,32 +64,35 @@ class ActiveSession ...@@ -63,32 +64,35 @@ class ActiveSession
def self.list(user) def self.list(user)
Gitlab::Redis::SharedState.with do |redis| Gitlab::Redis::SharedState.with do |redis|
cleaned_up_lookup_entries(redis, user).map do |entry| cleaned_up_lookup_entries(redis, user).map do |raw_session|
# rubocop:disable Security/MarshalLoad load_raw_session(raw_session)
Marshal.load(entry)
# rubocop:enable Security/MarshalLoad
end end
end end
end end
def self.destroy(user, session_id) def self.destroy(user, session_id)
return unless session_id
Gitlab::Redis::SharedState.with do |redis| Gitlab::Redis::SharedState.with do |redis|
destroy_sessions(redis, user, [session_id]) destroy_sessions(redis, user, [session_id])
end end
end end
def self.destroy_with_public_id(user, public_id) def self.destroy_with_public_id(user, public_id)
session_id = decrypt_public_id(public_id) decrypted_id = decrypt_public_id(public_id)
destroy(user, session_id) unless session_id.nil?
return if decrypted_id.nil?
session_id = Rack::Session::SessionId.new(decrypted_id)
destroy(user, session_id)
end end
def self.destroy_sessions(redis, user, session_ids) def self.destroy_sessions(redis, user, session_ids)
key_names = session_ids.map {|session_id| key_name(user.id, session_id) } key_names = session_ids.map { |session_id| key_name(user.id, session_id.public_id) }
session_names = session_ids.map {|session_id| "#{Gitlab::Redis::SharedState::SESSION_NAMESPACE}:#{session_id}" }
redis.srem(lookup_key_name(user.id), session_ids) redis.srem(lookup_key_name(user.id), session_ids.map(&:public_id))
redis.del(key_names) redis.del(key_names)
redis.del(session_names) redis.del(rack_session_keys(session_ids))
end end
def self.cleanup(user) def self.cleanup(user)
...@@ -110,28 +114,65 @@ class ActiveSession ...@@ -110,28 +114,65 @@ class ActiveSession
sessions_from_ids(session_ids_for_user(user.id)) sessions_from_ids(session_ids_for_user(user.id))
end end
# Lists the relevant session IDs for the user.
#
# Returns an array of Rack::Session::SessionId objects
def self.session_ids_for_user(user_id) def self.session_ids_for_user(user_id)
Gitlab::Redis::SharedState.with do |redis| Gitlab::Redis::SharedState.with do |redis|
redis.smembers(lookup_key_name(user_id)) session_ids = redis.smembers(lookup_key_name(user_id))
session_ids.map { |id| Rack::Session::SessionId.new(id) }
end end
end end
# Lists the ActiveSession objects for the given session IDs.
#
# session_ids - An array of Rack::Session::SessionId objects
#
# Returns an array of ActiveSession objects
def self.sessions_from_ids(session_ids) def self.sessions_from_ids(session_ids)
return [] if session_ids.empty? return [] if session_ids.empty?
Gitlab::Redis::SharedState.with do |redis| Gitlab::Redis::SharedState.with do |redis|
session_keys = session_ids.map { |session_id| "#{Gitlab::Redis::SharedState::SESSION_NAMESPACE}:#{session_id}" } session_keys = rack_session_keys(session_ids)
session_keys.each_slice(SESSION_BATCH_SIZE).flat_map do |session_keys_batch| session_keys.each_slice(SESSION_BATCH_SIZE).flat_map do |session_keys_batch|
redis.mget(session_keys_batch).compact.map do |raw_session| redis.mget(session_keys_batch).compact.map do |raw_session|
# rubocop:disable Security/MarshalLoad load_raw_session(raw_session)
Marshal.load(raw_session)
# rubocop:enable Security/MarshalLoad
end end
end end
end end
end end
# Deserializes an ActiveSession object from Redis.
#
# raw_session - Raw bytes from Redis
#
# Returns an ActiveSession object
def self.load_raw_session(raw_session)
# rubocop:disable Security/MarshalLoad
session = Marshal.load(raw_session)
# rubocop:enable Security/MarshalLoad
# Older ActiveSession models serialize `session_id` as strings, To
# avoid breaking older sessions, we keep backwards compatibility
# with older Redis keys and initiate Rack::Session::SessionId here.
session.session_id = Rack::Session::SessionId.new(session.session_id) if session.try(:session_id).is_a?(String)
session
end
def self.rack_session_keys(session_ids)
session_ids.each_with_object([]) do |session_id, arr|
# This is a redis-rack implementation detail
# (https://github.com/redis-store/redis-rack/blob/master/lib/rack/session/redis.rb#L88)
#
# We need to delete session keys based on the legacy public key name
# and the newer private ID keys, but there's no well-defined interface
# so we have to do it directly.
arr << "#{Gitlab::Redis::SharedState::SESSION_NAMESPACE}:#{session_id.public_id}"
arr << "#{Gitlab::Redis::SharedState::SESSION_NAMESPACE}:#{session_id.private_id}"
end
end
def self.raw_active_session_entries(redis, session_ids, user_id) def self.raw_active_session_entries(redis, session_ids, user_id)
return [] if session_ids.empty? return [] if session_ids.empty?
...@@ -146,7 +187,7 @@ class ActiveSession ...@@ -146,7 +187,7 @@ class ActiveSession
entry_keys = raw_active_session_entries(redis, session_ids, user_id) entry_keys = raw_active_session_entries(redis, session_ids, user_id)
entry_keys.compact.map do |raw_session| entry_keys.compact.map do |raw_session|
Marshal.load(raw_session) # rubocop:disable Security/MarshalLoad load_raw_session(raw_session)
end end
end end
...@@ -159,10 +200,13 @@ class ActiveSession ...@@ -159,10 +200,13 @@ class ActiveSession
sessions = active_session_entries(session_ids, user.id, redis) sessions = active_session_entries(session_ids, user.id, redis)
sessions.sort_by! {|session| session.updated_at }.reverse! sessions.sort_by! {|session| session.updated_at }.reverse!
destroyable_sessions = sessions.drop(ALLOWED_NUMBER_OF_ACTIVE_SESSIONS) destroyable_sessions = sessions.drop(ALLOWED_NUMBER_OF_ACTIVE_SESSIONS)
destroyable_session_ids = destroyable_sessions.map { |session| session.send :session_id } # rubocop:disable GitlabSecurity/PublicSend destroyable_session_ids = destroyable_sessions.map { |session| session.session_id }
destroy_sessions(redis, user, destroyable_session_ids) if destroyable_session_ids.any? destroy_sessions(redis, user, destroyable_session_ids) if destroyable_session_ids.any?
end end
# Cleans up the lookup set by removing any session IDs that are no longer present.
#
# Returns an array of marshalled ActiveModel objects that are still active.
def self.cleaned_up_lookup_entries(redis, user) def self.cleaned_up_lookup_entries(redis, user)
session_ids = session_ids_for_user(user.id) session_ids = session_ids_for_user(user.id)
entries = raw_active_session_entries(redis, session_ids, user.id) entries = raw_active_session_entries(redis, session_ids, user.id)
...@@ -181,13 +225,8 @@ class ActiveSession ...@@ -181,13 +225,8 @@ class ActiveSession
end end
private_class_method def self.decrypt_public_id(public_id) private_class_method def self.decrypt_public_id(public_id)
decoded_id = CGI.unescape(public_id) Gitlab::CryptoHelper.aes256_gcm_decrypt(public_id)
Gitlab::CryptoHelper.aes256_gcm_decrypt(decoded_id)
rescue rescue
nil nil
end end
private
attr_reader :session_id
end end
...@@ -2,17 +2,19 @@ ...@@ -2,17 +2,19 @@
# rubocop:disable Rails/ActiveRecordAliases # rubocop:disable Rails/ActiveRecordAliases
class WikiPage class WikiPage
include Gitlab::Utils::StrongMemoize
PageChangedError = Class.new(StandardError) PageChangedError = Class.new(StandardError)
PageRenameError = Class.new(StandardError) PageRenameError = Class.new(StandardError)
FrontMatterTooLong = Class.new(StandardError)
MAX_TITLE_BYTES = 245
MAX_DIRECTORY_BYTES = 255
include ActiveModel::Validations include ActiveModel::Validations
include ActiveModel::Conversion include ActiveModel::Conversion
include StaticModel include StaticModel
extend ActiveModel::Naming extend ActiveModel::Naming
delegate :content, :front_matter, to: :parsed_content
def self.primary_key def self.primary_key
'slug' 'slug'
end end
...@@ -114,8 +116,7 @@ class WikiPage ...@@ -114,8 +116,7 @@ class WikiPage
@attributes[:title] = new_title @attributes[:title] = new_title
end end
# The raw content of this page. def raw_content
def content
@attributes[:content] ||= @page&.text_data @attributes[:content] ||= @page&.text_data
end end
...@@ -238,7 +239,7 @@ class WikiPage ...@@ -238,7 +239,7 @@ class WikiPage
save do save do
wiki.update_page( wiki.update_page(
@page, @page,
content: content, content: raw_content,
format: format, format: format,
message: attrs[:message], message: attrs[:message],
title: title title: title
...@@ -281,8 +282,10 @@ class WikiPage ...@@ -281,8 +282,10 @@ class WikiPage
# Updates the current @attributes hash by merging a hash of params # Updates the current @attributes hash by merging a hash of params
def update_attributes(attrs) def update_attributes(attrs)
attrs[:title] = process_title(attrs[:title]) if attrs[:title].present? attrs[:title] = process_title(attrs[:title]) if attrs[:title].present?
update_front_matter(attrs)
attrs.slice!(:content, :format, :message, :title) attrs.slice!(:content, :format, :message, :title)
clear_memoization(:parsed_content) if attrs.has_key?(:content)
@attributes.merge!(attrs) @attributes.merge!(attrs)
end end
...@@ -293,6 +296,28 @@ class WikiPage ...@@ -293,6 +296,28 @@ class WikiPage
private private
def serialize_front_matter(hash)
return '' unless hash.present?
YAML.dump(hash.transform_keys(&:to_s)) + "---\n"
end
def update_front_matter(attrs)
return unless Gitlab::WikiPages::FrontMatterParser.enabled?(project)
return unless attrs.has_key?(:front_matter)
fm_yaml = serialize_front_matter(attrs[:front_matter])
raise FrontMatterTooLong if fm_yaml.size > Gitlab::WikiPages::FrontMatterParser::MAX_FRONT_MATTER_LENGTH
attrs[:content] = fm_yaml + (attrs[:content].presence || content)
end
def parsed_content
strong_memoize(:parsed_content) do
Gitlab::WikiPages::FrontMatterParser.new(raw_content, project).parse
end
end
# Process and format the title based on the user input. # Process and format the title based on the user input.
def process_title(title) def process_title(title)
return if title.blank? return if title.blank?
...@@ -339,14 +364,16 @@ class WikiPage ...@@ -339,14 +364,16 @@ class WikiPage
def validate_path_limits def validate_path_limits
*dirnames, title = @attributes[:title].split('/') *dirnames, title = @attributes[:title].split('/')
if title && title.bytesize > MAX_TITLE_BYTES if title && title.bytesize > Gitlab::WikiPages::MAX_TITLE_BYTES
errors.add(:title, _("exceeds the limit of %{bytes} bytes") % { bytes: MAX_TITLE_BYTES }) errors.add(:title, _("exceeds the limit of %{bytes} bytes") % {
bytes: Gitlab::WikiPages::MAX_TITLE_BYTES
})
end end
invalid_dirnames = dirnames.select { |d| d.bytesize > MAX_DIRECTORY_BYTES } invalid_dirnames = dirnames.select { |d| d.bytesize > Gitlab::WikiPages::MAX_DIRECTORY_BYTES }
invalid_dirnames.each do |dirname| invalid_dirnames.each do |dirname|
errors.add(:title, _('exceeds the limit of %{bytes} bytes for directory name "%{dirname}"') % { errors.add(:title, _('exceeds the limit of %{bytes} bytes for directory name "%{dirname}"') % {
bytes: MAX_DIRECTORY_BYTES, bytes: Gitlab::WikiPages::MAX_DIRECTORY_BYTES,
dirname: dirname dirname: dirname
}) })
end end
......
...@@ -29,7 +29,15 @@ module Groups ...@@ -29,7 +29,15 @@ module Groups
group.chat_team&.remove_mattermost_team(current_user) group.chat_team&.remove_mattermost_team(current_user)
user_ids_for_project_authorizations_refresh = group.user_ids_for_project_authorizations
group.destroy group.destroy
UserProjectAccessChangedService
.new(user_ids_for_project_authorizations_refresh)
.execute(blocking: true)
group
end end
# rubocop: enable CodeReuse/ActiveRecord # rubocop: enable CodeReuse/ActiveRecord
end end
......
---
title: Read metadata from Wiki front-matter
merge_request: 27706
author:
type: added
# frozen_string_literal: true
class ScheduleRecalculateProjectAuthorizationsThirdRun < ActiveRecord::Migration[5.1]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
MIGRATION = 'RecalculateProjectAuthorizationsWithMinMaxUserId'
BATCH_SIZE = 2_500
DELAY_INTERVAL = 2.minutes.to_i
disable_ddl_transaction!
class User < ActiveRecord::Base
include ::EachBatch
self.table_name = 'users'
end
def up
say "Scheduling #{MIGRATION} jobs"
queue_background_migration_jobs_by_range_at_intervals(User, MIGRATION, DELAY_INTERVAL, batch_size: BATCH_SIZE)
end
def down
end
end
...@@ -12947,6 +12947,7 @@ COPY "schema_migrations" (version) FROM STDIN; ...@@ -12947,6 +12947,7 @@ COPY "schema_migrations" (version) FROM STDIN;
20200204070729 20200204070729
20200204113223 20200204113223
20200204113224 20200204113224
20200204113225
20200204131054 20200204131054
20200204131831 20200204131831
20200205143231 20200205143231
......
...@@ -2,6 +2,7 @@ Akismet ...@@ -2,6 +2,7 @@ Akismet
Alertmanager Alertmanager
Algolia Algolia
Ansible Ansible
Anthos
API API
approvers approvers
Artifactory Artifactory
...@@ -118,6 +119,8 @@ hardcode ...@@ -118,6 +119,8 @@ hardcode
hardcoded hardcoded
hardcodes hardcodes
Helm Helm
Heroku
Herokuish
HipChat HipChat
hostname hostname
hostnames hostnames
...@@ -133,6 +136,7 @@ initializers ...@@ -133,6 +136,7 @@ initializers
interdependencies interdependencies
interdependency interdependency
Irker Irker
Istio
jasmine-jquery jasmine-jquery
JavaScript JavaScript
Jaeger Jaeger
...@@ -180,6 +184,7 @@ misconfiguring ...@@ -180,6 +184,7 @@ misconfiguring
mitigations mitigations
mockup mockup
mockups mockups
ModSecurity
nameserver nameserver
nameservers nameservers
namespace namespace
...@@ -326,6 +331,10 @@ unchecking ...@@ -326,6 +331,10 @@ unchecking
unchecks unchecks
uncomment uncomment
uncommented uncommented
unencode
unencoded
unencoder
unencodes
unencrypted unencrypted
Unicorn Unicorn
unindexed unindexed
......
...@@ -22,10 +22,9 @@ Pipelines comprise: ...@@ -22,10 +22,9 @@ Pipelines comprise:
Jobs are executed by [Runners](../runners/README.md). Multiple jobs in the same stage are executed in parallel, Jobs are executed by [Runners](../runners/README.md). Multiple jobs in the same stage are executed in parallel,
if there are enough concurrent runners. if there are enough concurrent runners.
If all the jobs in a stage: If *all* jobs in a stage succeed, the pipeline moves on to the next stage.
- Succeed, the pipeline moves on to the next stage. If *any* job in a stage fails, the next stage is not (usually) executed and the pipeline ends early.
- Fail, the next stage is not (usually) executed and the pipeline ends early.
In general, pipelines are executed automatically and require no intervention once created. However, there are In general, pipelines are executed automatically and require no intervention once created. However, there are
also times when you can manually interact with a pipeline. also times when you can manually interact with a pipeline.
...@@ -46,6 +45,10 @@ you may need to enable pipeline triggering in your project's ...@@ -46,6 +45,10 @@ you may need to enable pipeline triggering in your project's
Pipelines can be configured in many different ways: Pipelines can be configured in many different ways:
- [Basic pipelines](pipeline_architectures.md#basic-pipelines) run everything in each stage concurrently,
followed by the next stage.
- [Directed Acyclic Graph Pipeline (DAG) pipelines](../directed_acyclic_graph/index.md) are based on relationships
between jobs and can run more quickly than basic pipelines.
- [Multi-project pipelines](../multi_project_pipelines.md) combine pipelines for different projects together. - [Multi-project pipelines](../multi_project_pipelines.md) combine pipelines for different projects together.
- [Parent-Child pipelines](../parent_child_pipelines.md) break down complex pipelines - [Parent-Child pipelines](../parent_child_pipelines.md) break down complex pipelines
into one parent pipeline that can trigger multiple child sub-pipelines, which all into one parent pipeline that can trigger multiple child sub-pipelines, which all
......
# Customizing Auto DevOps # Customizing Auto DevOps
While Auto DevOps provides great defaults to get you started, you can customize While [Auto DevOps](index.md) provides great defaults to get you started, you can customize
almost everything to fit your needs; from custom [buildpacks](#custom-buildpacks), almost everything to fit your needs. Auto DevOps offers everything from custom
to [`Dockerfile`s](#custom-dockerfile), [Helm charts](#custom-helm-chart), or [buildpacks](#custom-buildpacks), to [`Dockerfiles](#custom-dockerfile), and
even copying the complete [CI/CD configuration](#customizing-gitlab-ciyml) [Helm charts](#custom-helm-chart). You can even copy the complete
into your project to enable staging and canary deployments, and more. [CI/CD configuration](#customizing-gitlab-ciyml) into your project to enable
staging and canary deployments, and more.
## Custom buildpacks ## Custom buildpacks
If the automatic buildpack detection fails for your project, or if you want to If the automatic buildpack detection fails for your project, or if you want to
use a custom buildpack, you can override the buildpack(s) using a project variable use a custom buildpack, you can override the buildpack using a project variable
or a `.buildpacks` file in your project: or a `.buildpacks` file in your project:
- **Project variable** - Create a project variable `BUILDPACK_URL` with the URL - **Project variable** - Create a project variable `BUILDPACK_URL` with the URL
of the buildpack to use. of the buildpack to use.
- **`.buildpacks` file** - Add a file in your project's repo called `.buildpacks` - **`.buildpacks` file** - Add a file in your project's repository called `.buildpacks`,
and add the URL of the buildpack to use on a line in the file. If you want to and add the URL of the buildpack to use on a line in the file. If you want to
use multiple buildpacks, you can enter them in, one on each line. use multiple buildpacks, enter one buildpack per line.
The buildpack URL can point to either a Git repository URL or a tarball URL. The buildpack URL can point to either a Git repository URL or a tarball URL.
For Git repositories, it is possible to point to a specific Git reference (for example, For Git repositories, you can point to a specific Git reference (such as
commit SHA, tag name, or branch name) by appending `#<ref>` to the Git repository URL. commit SHA, tag name, or branch name) by appending `#<ref>` to the Git repository URL.
For example: For example:
...@@ -29,11 +30,10 @@ For example: ...@@ -29,11 +30,10 @@ For example:
### Multiple buildpacks ### Multiple buildpacks
Using multiple buildpacks isn't fully supported by Auto DevOps because, when using the `.buildpacks` Using multiple buildpacks is not fully supported by Auto DevOps, because Auto Test
file, Auto Test will not work. won't work when using the `.buildpacks` file. The buildpack
[heroku-buildpack-multi](https://github.com/heroku/heroku-buildpack-multi/), used
The buildpack [heroku-buildpack-multi](https://github.com/heroku/heroku-buildpack-multi/), in the backend to parse the `.buildpacks` file, does not provide the necessary commands
which is used under the hood to parse the `.buildpacks` file, doesn't provide the necessary commands
`bin/test-compile` and `bin/test`. `bin/test-compile` and `bin/test`.
If your goal is to use only a single custom buildpack, you should provide the project variable If your goal is to use only a single custom buildpack, you should provide the project variable
...@@ -41,18 +41,16 @@ If your goal is to use only a single custom buildpack, you should provide the pr ...@@ -41,18 +41,16 @@ If your goal is to use only a single custom buildpack, you should provide the pr
## Custom `Dockerfile` ## Custom `Dockerfile`
If your project has a `Dockerfile` in the root of the project repo, Auto DevOps If your project has a `Dockerfile` in the root of the project repository, Auto DevOps
will build a Docker image based on the Dockerfile rather than using buildpacks. builds a Docker image based on the Dockerfile, rather than using buildpacks.
This can be much faster and result in smaller images, especially if your This can be much faster and result in smaller images, especially if your
Dockerfile is based on [Alpine](https://hub.docker.com/_/alpine/). Dockerfile is based on [Alpine](https://hub.docker.com/_/alpine/).
## Passing arguments to `docker build` ## Passing arguments to `docker build`
Arguments can be passed to the `docker build` command using the Arguments can be passed to the `docker build` command using the
`AUTO_DEVOPS_BUILD_IMAGE_EXTRA_ARGS` project variable. `AUTO_DEVOPS_BUILD_IMAGE_EXTRA_ARGS` project variable. For example, to build a
Docker image based on based on the `ruby:alpine` instead of the default `ruby:latest`:
For example, to build a Docker image based on based on the `ruby:alpine`
instead of the default `ruby:latest`:
1. Set `AUTO_DEVOPS_BUILD_IMAGE_EXTRA_ARGS` to `--build-arg=RUBY_VERSION=alpine`. 1. Set `AUTO_DEVOPS_BUILD_IMAGE_EXTRA_ARGS` to `--build-arg=RUBY_VERSION=alpine`.
1. Add the following to a custom `Dockerfile`: 1. Add the following to a custom `Dockerfile`:
...@@ -65,25 +63,28 @@ instead of the default `ruby:latest`: ...@@ -65,25 +63,28 @@ instead of the default `ruby:latest`:
``` ```
NOTE: **Note:** NOTE: **Note:**
Passing in complex values (newlines and spaces, for example) will likely Use Base64 encoding if you need to pass complex values, such as newlines and
cause escaping issues due to the way this argument is used in Auto DevOps. spaces. Left unencoded, complex values like these can cause escaping issues
Consider using Base64 encoding of such values to avoid this problem. due to how Auto DevOps uses the arguments.
CAUTION: **Warning:** CAUTION: **Warning:**
Avoid passing secrets as Docker build arguments if possible, as they may be Avoid passing secrets as Docker build arguments if possible, as they may be
persisted in your image. See persisted in your image. See
[this discussion](https://github.com/moby/moby/issues/13490) for details. [this discussion of best practices with secrets](https://github.com/moby/moby/issues/13490) for details.
## Passing secrets to `docker build` ## Passing secrets to `docker build`
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/25514) in GitLab 12.3, but available in versions 11.9 and above. > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/25514) in GitLab 12.3, but available in versions 11.9 and above.
CI environment variables can be passed as [build CI environment variables can be passed as
secrets](https://docs.docker.com/develop/develop-images/build_enhancements/#new-docker-build-secret-information) to the `docker build` command by listing them comma separated by name in the [build secrets](https://docs.docker.com/develop/develop-images/build_enhancements/#new-docker-build-secret-information) to the `docker build` command by listing them
`AUTO_DEVOPS_BUILD_IMAGE_FORWARDED_CI_VARIABLES` variable. For example, in order to forward the variables `CI_COMMIT_SHA` and `CI_ENVIRONMENT_NAME`, one would set `AUTO_DEVOPS_BUILD_IMAGE_FORWARDED_CI_VARIABLES` to `CI_COMMIT_SHA,CI_ENVIRONMENT_NAME`. by name, comma-separated, in the `AUTO_DEVOPS_BUILD_IMAGE_FORWARDED_CI_VARIABLES`
variable. For example, to forward the variables `CI_COMMIT_SHA` and `CI_ENVIRONMENT_NAME`,
set `AUTO_DEVOPS_BUILD_IMAGE_FORWARDED_CI_VARIABLES` to `CI_COMMIT_SHA,CI_ENVIRONMENT_NAME`.
Unlike build arguments, these are not persisted by Docker in the final image CAUTION: **Caution:**
(though you can still persist them yourself, so **be careful**). Unlike build arguments, these variables are not persisted by Docker in the final image,
though you can still persist them yourself.
In projects: In projects:
...@@ -91,7 +92,7 @@ In projects: ...@@ -91,7 +92,7 @@ In projects:
variables. variables.
- With a `Dockerfile`, the following is required: - With a `Dockerfile`, the following is required:
1. Activate the experimental `Dockerfile` syntax by adding the following 1. Activate the experimental `Dockerfile` syntax by adding the following code
to the top of the file: to the top of the file:
```dockerfile ```dockerfile
...@@ -114,30 +115,33 @@ feature to use the `--secret` flag. ...@@ -114,30 +115,33 @@ feature to use the `--secret` flag.
Auto DevOps uses [Helm](https://helm.sh/) to deploy your application to Kubernetes. Auto DevOps uses [Helm](https://helm.sh/) to deploy your application to Kubernetes.
You can override the Helm chart used by bundling up a chart into your project You can override the Helm chart used by bundling up a chart into your project
repo or by specifying a project variable: repository or by specifying a project variable:
- **Bundled chart** - If your project has a `./chart` directory with a `Chart.yaml` - **Bundled chart** - If your project has a `./chart` directory with a `Chart.yaml`
file in it, Auto DevOps will detect the chart and use it instead of the [default file in it, Auto DevOps will detect the chart and use it instead of the
one](https://gitlab.com/gitlab-org/charts/auto-deploy-app). [default chart](https://gitlab.com/gitlab-org/charts/auto-deploy-app), enabling
This can be a great way to control exactly how your application is deployed. you to control exactly how your application is deployed.
- **Project variable** - Create a [project variable](../../ci/variables/README.md#gitlab-cicd-environment-variables) - **Project variable** - Create a [project variable](../../ci/variables/README.md#gitlab-cicd-environment-variables)
`AUTO_DEVOPS_CHART` with the URL of a custom chart to use or create two project variables `AUTO_DEVOPS_CHART_REPOSITORY` with the URL of a custom chart repository and `AUTO_DEVOPS_CHART` with the path to the chart. `AUTO_DEVOPS_CHART` with the URL of a custom chart to use, or create two project
variables: `AUTO_DEVOPS_CHART_REPOSITORY` with the URL of a custom chart repository,
and `AUTO_DEVOPS_CHART` with the path to the chart.
## Customize values for Helm Chart ## Customize values for Helm Chart
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/30628) in GitLab 12.6, `.gitlab/auto-deploy-values.yaml` will be used by default for Helm upgrades. > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/30628) in GitLab 12.6, `.gitlab/auto-deploy-values.yaml` will be used by default for Helm upgrades.
You can override the default values in the `values.yaml` file in the [default Helm chart](https://gitlab.com/gitlab-org/charts/auto-deploy-app). You can override the default values in the `values.yaml` file in the
This can be achieved by either: [default Helm chart](https://gitlab.com/gitlab-org/charts/auto-deploy-app) by either:
- Adding a file named `.gitlab/auto-deploy-values.yaml` to your repository. It will - Adding a file named `.gitlab/auto-deploy-values.yaml` to your repository, which is
be automatically used if found. automatically used, if found.
- Adding a file with a different name or path to the repository, and set the - Adding a file with a different name or path to the repository, and setting the
`HELM_UPGRADE_VALUES_FILE` [environment variable](#environment-variables) with the path and name. `HELM_UPGRADE_VALUES_FILE` [environment variable](#environment-variables) with
the path and name.
NOTE: **Note:** NOTE: **Note:**
For GitLab 12.5 and earlier, the `HELM_UPGRADE_EXTRA_ARGS` environment variable can be used to override the default chart values. For GitLab 12.5 and earlier, use the `HELM_UPGRADE_EXTRA_ARGS` environment variable
To do so, set `HELM_UPGRADE_EXTRA_ARGS` to `--values my-values.yaml`. to override the default chart values by setting `HELM_UPGRADE_EXTRA_ARGS` to `--values <my-values.yaml>`.
## Custom Helm chart per environment ## Custom Helm chart per environment
...@@ -146,34 +150,34 @@ to the desired environment. See [Limiting environment scopes of variables](../.. ...@@ -146,34 +150,34 @@ to the desired environment. See [Limiting environment scopes of variables](../..
## Customizing `.gitlab-ci.yml` ## Customizing `.gitlab-ci.yml`
Auto DevOps is completely customizable because the [Auto DevOps template](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml): Auto DevOps is completely customizable because the
[Auto DevOps template](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml)
is just an implementation of a [`.gitlab-ci.yml`](../../ci/yaml/README.md) file,
and uses only features available to any implementation of `.gitlab-ci.yml`.
- Is just an implementation of a [`.gitlab-ci.yml`](../../ci/yaml/README.md) file. To modify the CI/CD pipeline used by Auto DevOps,
- Uses only features available to any implementation of `.gitlab-ci.yml`. [`include` the template](../../ci/yaml/README.md#includetemplate), and customize
it as needed by adding a `.gitlab-ci.yml` file to the root of your repository
If you want to modify the CI/CD pipeline used by Auto DevOps, you can [`include`
the template](../../ci/yaml/README.md#includetemplate) and customize as
needed. To do this, add a `.gitlab-ci.yml` file to the root of your repository
containing the following: containing the following:
```yml ```yaml
include: include:
- template: Auto-DevOps.gitlab-ci.yml - template: Auto-DevOps.gitlab-ci.yml
``` ```
Then add any extra changes you want. Your additions will be merged with the Add your changes, and your additions will be merged with the
[Auto DevOps template](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml) using the behaviour described for [Auto DevOps template](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml)
[`include`](../../ci/yaml/README.md#include). using the behavior described for [`include`](../../ci/yaml/README.md#include).
It is also possible to copy and paste the contents of the [Auto DevOps template](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml) If you need to specifically remove a part of the file, you can also copy and paste the contents of the
into your project and edit this as needed. You may prefer to do it [Auto DevOps template](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml)
that way if you want to specifically remove any part of it. into your project and edit it as needed.
## Customizing the Kubernetes namespace ## Customizing the Kubernetes namespace
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/27630) in GitLab 12.6. > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/27630) in GitLab 12.6.
For **non**-GitLab-managed clusters, the namespace can be customized using For clusters not managed by GitLab, you can customize the namespace in
`.gitlab-ci.yml` by specifying `.gitlab-ci.yml` by specifying
[`environment:kubernetes:namespace`](../../ci/environments.md#configuring-kubernetes-deployments). [`environment:kubernetes:namespace`](../../ci/environments.md#configuring-kubernetes-deployments).
For example, the following configuration overrides the namespace used for For example, the following configuration overrides the namespace used for
...@@ -212,15 +216,14 @@ include: ...@@ -212,15 +216,14 @@ include:
- template: Jobs/Build.gitlab-ci.yml - template: Jobs/Build.gitlab-ci.yml
``` ```
Consult the [Auto DevOps template](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml) for information on available jobs. See the [Auto DevOps template](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml) for information on available jobs.
## PostgreSQL database support ## PostgreSQL database support
In order to support applications that require a database, To support applications requiring a database,
[PostgreSQL](https://www.postgresql.org/) is provisioned by default. The credentials to access [PostgreSQL](https://www.postgresql.org/) is provisioned by default. The credentials to access
the database are preconfigured, but can be customized by setting the associated the database are preconfigured, but can be customized by setting the associated
[variables](#environment-variables). These credentials can be used for defining a [variables](#environment-variables). You can use these credentials to define a `DATABASE_URL`:
`DATABASE_URL` of the format:
```yaml ```yaml
postgres://user:password@postgres-host:postgres-port/postgres-database postgres://user:password@postgres-host:postgres-port/postgres-database
...@@ -230,7 +233,7 @@ postgres://user:password@postgres-host:postgres-port/postgres-database ...@@ -230,7 +233,7 @@ postgres://user:password@postgres-host:postgres-port/postgres-database
CAUTION: **Deprecation** CAUTION: **Deprecation**
The variable `AUTO_DEVOPS_POSTGRES_CHANNEL` that controls default provisioned The variable `AUTO_DEVOPS_POSTGRES_CHANNEL` that controls default provisioned
PostgreSQL currently defaults to `1`. This is scheduled to change to `2` in PostgreSQL currently defaults to `1`. This value is scheduled to change to `2` in
[GitLab 13.0](https://gitlab.com/gitlab-org/gitlab/-/issues/210499). [GitLab 13.0](https://gitlab.com/gitlab-org/gitlab/-/issues/210499).
The version of the chart used to provision PostgreSQL: The version of the chart used to provision PostgreSQL:
...@@ -250,19 +253,18 @@ To use the new PostgreSQL: ...@@ -250,19 +253,18 @@ To use the new PostgreSQL:
### Using external PostgreSQL database providers ### Using external PostgreSQL database providers
While Auto DevOps provides out-of-the-box support for a PostgreSQL container for While Auto DevOps provides out-of-the-box support for a PostgreSQL container for
production environments, for some use-cases it may not be sufficiently secure or production environments, for some use cases, it may not be sufficiently secure or
resilient and you may wish to use an external managed provider for PostgreSQL. resilient, and you may want to use an external managed provider (such as
For example, AWS Relational Database Service. AWS Relational Database Service) for PostgreSQL.
You will need to define environment-scoped variables for `POSTGRES_ENABLED` and `DATABASE_URL` in your project's CI/CD settings. You must define environment-scoped variables for `POSTGRES_ENABLED` and
`DATABASE_URL` in your project's CI/CD settings:
To achieve this:
1. Disable the built-in PostgreSQL installation for the required environments using 1. Disable the built-in PostgreSQL installation for the required environments using
scoped [environment variables](../../ci/environments.md#scoping-environments-with-specs). scoped [environment variables](../../ci/environments.md#scoping-environments-with-specs).
For this use case, it's likely that only `production` will need to be added to this For this use case, it's likely that only `production` will need to be added to this
list as the builtin PostgreSQL setup for Review Apps and staging will be sufficient list. The built-in PostgreSQL setup for Review Apps and staging is sufficient,
as a high availability setup is not required. because a high availability setup is not required.
![Auto Metrics](img/disable_postgres.png) ![Auto Metrics](img/disable_postgres.png)
...@@ -273,14 +275,14 @@ To achieve this: ...@@ -273,14 +275,14 @@ To achieve this:
postgres://user:password@postgres-host:postgres-port/postgres-database postgres://user:password@postgres-host:postgres-port/postgres-database
``` ```
You will need to ensure that your Kubernetes cluster has network access to wherever You must ensure that your Kubernetes cluster has network access to wherever
PostgreSQL is hosted. PostgreSQL is hosted.
## Environment variables ## Environment variables
The following variables can be used for setting up the Auto DevOps domain, The following variables can be used for setting up the Auto DevOps domain,
providing a custom Helm chart, or scaling your application. PostgreSQL can providing a custom Helm chart, or scaling your application. PostgreSQL can
also be customized, and you can easily use a [custom buildpack](#custom-buildpacks). also be customized, and you can use a [custom buildpack](#custom-buildpacks).
### Build and deployment ### Build and deployment
...@@ -292,34 +294,34 @@ applications. ...@@ -292,34 +294,34 @@ applications.
| `ADDITIONAL_HOSTS` | Fully qualified domain names specified as a comma-separated list that are added to the Ingress hosts. | | `ADDITIONAL_HOSTS` | Fully qualified domain names specified as a comma-separated list that are added to the Ingress hosts. |
| `<ENVIRONMENT>_ADDITIONAL_HOSTS` | For a specific environment, the fully qualified domain names specified as a comma-separated list that are added to the Ingress hosts. This takes precedence over `ADDITIONAL_HOSTS`. | | `<ENVIRONMENT>_ADDITIONAL_HOSTS` | For a specific environment, the fully qualified domain names specified as a comma-separated list that are added to the Ingress hosts. This takes precedence over `ADDITIONAL_HOSTS`. |
| `AUTO_DEVOPS_BUILD_IMAGE_CNB_ENABLED` | When set to a non-empty value and no `Dockerfile` is present, Auto Build builds your application using Cloud Native Buildpacks instead of Herokuish. [More details](stages.md#auto-build-using-cloud-native-buildpacks-beta). | | `AUTO_DEVOPS_BUILD_IMAGE_CNB_ENABLED` | When set to a non-empty value and no `Dockerfile` is present, Auto Build builds your application using Cloud Native Buildpacks instead of Herokuish. [More details](stages.md#auto-build-using-cloud-native-buildpacks-beta). |
| `AUTO_DEVOPS_BUILD_IMAGE_EXTRA_ARGS` | Extra arguments to be passed to the `docker build` command. Note that using quotes will not prevent word splitting. [More details](#passing-arguments-to-docker-build). | | `AUTO_DEVOPS_BUILD_IMAGE_EXTRA_ARGS` | Extra arguments to be passed to the `docker build` command. Note that using quotes won't prevent word splitting. [More details](#passing-arguments-to-docker-build). |
| `AUTO_DEVOPS_BUILD_IMAGE_FORWARDED_CI_VARIABLES` | A [comma-separated list of CI variable names](#passing-secrets-to-docker-build) to be passed to the `docker build` command as secrets. | | `AUTO_DEVOPS_BUILD_IMAGE_FORWARDED_CI_VARIABLES` | A [comma-separated list of CI variable names](#passing-secrets-to-docker-build) to be passed to the `docker build` command as secrets. |
| `AUTO_DEVOPS_CHART` | Helm Chart used to deploy your apps. Defaults to the one [provided by GitLab](https://gitlab.com/gitlab-org/charts/auto-deploy-app). | | `AUTO_DEVOPS_CHART` | Helm Chart used to deploy your apps. Defaults to the one [provided by GitLab](https://gitlab.com/gitlab-org/charts/auto-deploy-app). |
| `AUTO_DEVOPS_CHART_REPOSITORY` | Helm Chart repository used to search for charts. Defaults to `https://charts.gitlab.io`. | | `AUTO_DEVOPS_CHART_REPOSITORY` | Helm Chart repository used to search for charts. Defaults to `https://charts.gitlab.io`. |
| `AUTO_DEVOPS_CHART_REPOSITORY_NAME` | From GitLab 11.11, used to set the name of the Helm repository. Defaults to `gitlab`. | | `AUTO_DEVOPS_CHART_REPOSITORY_NAME` | From GitLab 11.11, used to set the name of the Helm repository. Defaults to `gitlab`. |
| `AUTO_DEVOPS_CHART_REPOSITORY_USERNAME` | From GitLab 11.11, used to set a username to connect to the Helm repository. Defaults to no credentials. Also set `AUTO_DEVOPS_CHART_REPOSITORY_PASSWORD`. | | `AUTO_DEVOPS_CHART_REPOSITORY_USERNAME` | From GitLab 11.11, used to set a username to connect to the Helm repository. Defaults to no credentials. Also set `AUTO_DEVOPS_CHART_REPOSITORY_PASSWORD`. |
| `AUTO_DEVOPS_CHART_REPOSITORY_PASSWORD` | From GitLab 11.11, used to set a password to connect to the Helm repository. Defaults to no credentials. Also set `AUTO_DEVOPS_CHART_REPOSITORY_USERNAME`. | | `AUTO_DEVOPS_CHART_REPOSITORY_PASSWORD` | From GitLab 11.11, used to set a password to connect to the Helm repository. Defaults to no credentials. Also set `AUTO_DEVOPS_CHART_REPOSITORY_USERNAME`. |
| `AUTO_DEVOPS_MODSECURITY_SEC_RULE_ENGINE` | From GitLab 12.5, used in combination with [Modsecurity feature flag](../../user/clusters/applications.md#web-application-firewall-modsecurity) to toggle [Modsecurity's `SecRuleEngine`](https://github.com/SpiderLabs/ModSecurity/wiki/Reference-Manual-(v2.x)#SecRuleEngine) behavior. Defaults to `DetectionOnly`. | | `AUTO_DEVOPS_MODSECURITY_SEC_RULE_ENGINE` | From GitLab 12.5, used in combination with [ModSecurity feature flag](../../user/clusters/applications.md#web-application-firewall-modsecurity) to toggle [ModSecurity's `SecRuleEngine`](https://github.com/SpiderLabs/ModSecurity/wiki/Reference-Manual-(v2.x)#SecRuleEngine) behavior. Defaults to `DetectionOnly`. |
| `BUILDPACK_URL` | Buildpack's full URL. Can point to either [a Git repository URL or a tarball URL](#custom-buildpacks). | | `BUILDPACK_URL` | Buildpack's full URL. Can point to either [a Git repository URL or a tarball URL](#custom-buildpacks). |
| `CANARY_ENABLED` | From GitLab 11.0, used to define a [deploy policy for canary environments](#deploy-policy-for-canary-environments-premium). | | `CANARY_ENABLED` | From GitLab 11.0, used to define a [deploy policy for canary environments](#deploy-policy-for-canary-environments-premium). |
| `CANARY_PRODUCTION_REPLICAS` | Number of canary replicas to deploy for [Canary Deployments](../../user/project/canary_deployments.md) in the production environment. Takes precedence over `CANARY_REPLICAS`. Defaults to 1. | | `CANARY_PRODUCTION_REPLICAS` | Number of canary replicas to deploy for [Canary Deployments](../../user/project/canary_deployments.md) in the production environment. Takes precedence over `CANARY_REPLICAS`. Defaults to 1. |
| `CANARY_REPLICAS` | Number of canary replicas to deploy for [Canary Deployments](../../user/project/canary_deployments.md). Defaults to 1. | | `CANARY_REPLICAS` | Number of canary replicas to deploy for [Canary Deployments](../../user/project/canary_deployments.md). Defaults to 1. |
| `HELM_RELEASE_NAME` | From GitLab 12.1, allows the `helm` release name to be overridden. Can be used to assign unique release names when deploying multiple projects to a single namespace. | | `HELM_RELEASE_NAME` | From GitLab 12.1, allows the `helm` release name to be overridden. Can be used to assign unique release names when deploying multiple projects to a single namespace. |
| `HELM_UPGRADE_VALUES_FILE` | From GitLab 12.6, allows the `helm upgrade` values file to be overridden. Defaults to `.gitlab/auto-deploy-values.yaml`. | | `HELM_UPGRADE_VALUES_FILE` | From GitLab 12.6, allows the `helm upgrade` values file to be overridden. Defaults to `.gitlab/auto-deploy-values.yaml`. |
| `HELM_UPGRADE_EXTRA_ARGS` | From GitLab 11.11, allows extra arguments in `helm` commands when deploying the application. Note that using quotes will not prevent word splitting. **Tip:** you can use this variable to [customize the Auto Deploy Helm chart](#custom-helm-chart) by applying custom override values with `--values my-values.yaml`. | | `HELM_UPGRADE_EXTRA_ARGS` | From GitLab 11.11, allows extra arguments in `helm` commands when deploying the application. Note that using quotes won't prevent word splitting. **Tip:** you can use this variable to [customize the Auto Deploy Helm chart](#custom-helm-chart) by applying custom override values with `--values my-values.yaml`. |
| `INCREMENTAL_ROLLOUT_MODE` | From GitLab 11.4, if present, can be used to enable an [incremental rollout](#incremental-rollout-to-production-premium) of your application for the production environment. Set to `manual` for manual deployment jobs or `timed` for automatic rollout deployments with a 5 minute delay each one. | | `INCREMENTAL_ROLLOUT_MODE` | From GitLab 11.4, if present, can be used to enable an [incremental rollout](#incremental-rollout-to-production-premium) of your application for the production environment. Set to `manual` for manual deployment jobs or `timed` for automatic rollout deployments with a 5 minute delay each one. |
| `K8S_SECRET_*` | From GitLab 11.7, any variable prefixed with [`K8S_SECRET_`](#application-secret-variables) will be made available by Auto DevOps as environment variables to the deployed application. | | `K8S_SECRET_*` | From GitLab 11.7, any variable prefixed with [`K8S_SECRET_`](#application-secret-variables) will be made available by Auto DevOps as environment variables to the deployed application. |
| `KUBE_INGRESS_BASE_DOMAIN` | From GitLab 11.8, can be used to set a domain per cluster. See [cluster domains](../../user/project/clusters/index.md#base-domain) for more information. | | `KUBE_INGRESS_BASE_DOMAIN` | From GitLab 11.8, can be used to set a domain per cluster. See [cluster domains](../../user/project/clusters/index.md#base-domain) for more information. |
| `PRODUCTION_REPLICAS` | Number of replicas to deploy in the production environment. Takes precedence over `REPLICAS` and defaults to 1. For zero downtime upgrades, set to 2 or greater. | | `PRODUCTION_REPLICAS` | Number of replicas to deploy in the production environment. Takes precedence over `REPLICAS` and defaults to 1. For zero downtime upgrades, set to 2 or greater. |
| `REPLICAS` | Number of replicas to deploy. Defaults to 1. | | `REPLICAS` | Number of replicas to deploy. Defaults to 1. |
| `ROLLOUT_RESOURCE_TYPE` | From GitLab 11.9, allows specification of the resource type being deployed when using a custom Helm chart. Default value is `deployment`. | | `ROLLOUT_RESOURCE_TYPE` | From GitLab 11.9, allows specification of the resource type being deployed when using a custom Helm chart. Default value is `deployment`. |
| `ROLLOUT_STATUS_DISABLED` | From GitLab 12.0, used to disable rollout status check because it doesn't support all resource types, for example, `cronjob`. | | `ROLLOUT_STATUS_DISABLED` | From GitLab 12.0, used to disable rollout status check because it does not support all resource types, for example, `cronjob`. |
| `STAGING_ENABLED` | From GitLab 10.8, used to define a [deploy policy for staging and production environments](#deploy-policy-for-staging-and-production-environments). | | `STAGING_ENABLED` | From GitLab 10.8, used to define a [deploy policy for staging and production environments](#deploy-policy-for-staging-and-production-environments). |
TIP: **Tip:** TIP: **Tip:**
Set up the replica variables using a Set up the replica variables using a
[project variable](../../ci/variables/README.md#gitlab-cicd-environment-variables) [project variable](../../ci/variables/README.md#gitlab-cicd-environment-variables)
and scale your application by just redeploying it! and scale your application by only redeploying it.
CAUTION: **Caution:** CAUTION: **Caution:**
You should *not* scale your application using Kubernetes directly. This can You should *not* scale your application using Kubernetes directly. This can
...@@ -334,7 +336,7 @@ The following table lists variables related to the database. ...@@ -334,7 +336,7 @@ The following table lists variables related to the database.
|-----------------------------------------|------------------------------------| |-----------------------------------------|------------------------------------|
| `DB_INITIALIZE` | From GitLab 11.4, used to specify the command to run to initialize the application's PostgreSQL database. Runs inside the application pod. | | `DB_INITIALIZE` | From GitLab 11.4, used to specify the command to run to initialize the application's PostgreSQL database. Runs inside the application pod. |
| `DB_MIGRATE` | From GitLab 11.4, used to specify the command to run to migrate the application's PostgreSQL database. Runs inside the application pod. | | `DB_MIGRATE` | From GitLab 11.4, used to specify the command to run to migrate the application's PostgreSQL database. Runs inside the application pod. |
| `POSTGRES_ENABLED` | Whether PostgreSQL is enabled. Defaults to `"true"`. Set to `false` to disable the automatic deployment of PostgreSQL. | | `POSTGRES_ENABLED` | Whether PostgreSQL is enabled. Defaults to `true`. Set to `false` to disable the automatic deployment of PostgreSQL. |
| `POSTGRES_USER` | The PostgreSQL user. Defaults to `user`. Set it to use a custom username. | | `POSTGRES_USER` | The PostgreSQL user. Defaults to `user`. Set it to use a custom username. |
| `POSTGRES_PASSWORD` | The PostgreSQL password. Defaults to `testing-password`. Set it to use a custom password. | | `POSTGRES_PASSWORD` | The PostgreSQL password. Defaults to `testing-password`. Set it to use a custom password. |
| `POSTGRES_DB` | The PostgreSQL database name. Defaults to the value of [`$CI_ENVIRONMENT_SLUG`](../../ci/variables/README.md#predefined-environment-variables). Set it to use a custom database name. | | `POSTGRES_DB` | The PostgreSQL database name. Defaults to the value of [`$CI_ENVIRONMENT_SLUG`](../../ci/variables/README.md#predefined-environment-variables). Set it to use a custom database name. |
...@@ -354,35 +356,34 @@ The following table lists variables used to disable jobs. ...@@ -354,35 +356,34 @@ The following table lists variables used to disable jobs.
| **Variable** | **Description** | | **Variable** | **Description** |
|-----------------------------------------|------------------------------------| |-----------------------------------------|------------------------------------|
| `CODE_QUALITY_DISABLED` | From GitLab 11.0, used to disable the `codequality` job. If the variable is present, the job will not be created. | | `CODE_QUALITY_DISABLED` | From GitLab 11.0, used to disable the `codequality` job. If the variable is present, the job won't be created. |
| `CONTAINER_SCANNING_DISABLED` | From GitLab 11.0, used to disable the `sast:container` job. If the variable is present, the job will not be created. | | `CONTAINER_SCANNING_DISABLED` | From GitLab 11.0, used to disable the `sast:container` job. If the variable is present, the job won't be created. |
| `DAST_DISABLED` | From GitLab 11.0, used to disable the `dast` job. If the variable is present, the job will not be created. | | `DAST_DISABLED` | From GitLab 11.0, used to disable the `dast` job. If the variable is present, the job won't be created. |
| `DEPENDENCY_SCANNING_DISABLED` | From GitLab 11.0, used to disable the `dependency_scanning` job. If the variable is present, the job will not be created. | | `DEPENDENCY_SCANNING_DISABLED` | From GitLab 11.0, used to disable the `dependency_scanning` job. If the variable is present, the job won't be created. |
| `LICENSE_MANAGEMENT_DISABLED` | From GitLab 11.0, used to disable the `license_management` job. If the variable is present, the job will not be created. | | `LICENSE_MANAGEMENT_DISABLED` | From GitLab 11.0, used to disable the `license_management` job. If the variable is present, the job won't be created. |
| `PERFORMANCE_DISABLED` | From GitLab 11.0, used to disable the `performance` job. If the variable is present, the job will not be created. | | `PERFORMANCE_DISABLED` | From GitLab 11.0, used to disable the `performance` job. If the variable is present, the job won't be created. |
| `REVIEW_DISABLED` | From GitLab 11.0, used to disable the `review` and the manual `review:stop` job. If the variable is present, these jobs will not be created. | | `REVIEW_DISABLED` | From GitLab 11.0, used to disable the `review` and the manual `review:stop` job. If the variable is present, these jobs won't be created. |
| `SAST_DISABLED` | From GitLab 11.0, used to disable the `sast` job. If the variable is present, the job will not be created. | | `SAST_DISABLED` | From GitLab 11.0, used to disable the `sast` job. If the variable is present, the job won't be created. |
| `TEST_DISABLED` | From GitLab 11.0, used to disable the `test` job. If the variable is present, the job will not be created. | | `TEST_DISABLED` | From GitLab 11.0, used to disable the `test` job. If the variable is present, the job won't be created. |
### Application secret variables ### Application secret variables
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/49056) in GitLab 11.7. > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/49056) in GitLab 11.7.
Some applications need to define secret variables that are Some applications need to define secret variables that are accessible by the deployed
accessible by the deployed application. Auto DevOps detects variables where the key starts with application. Auto DevOps detects variables starting with `K8S_SECRET_`, and makes
`K8S_SECRET_` and make these prefixed variables available to the these prefixed variables available to the deployed application as environment variables.
deployed application, as environment variables.
To configure your application variables: To configure your application variables:
1. Go to your project's **Settings > CI/CD**, then expand the section 1. Go to your project's **{settings}** **Settings > CI/CD**, then expand the
called **Variables**. **Variables** section.
1. Create a CI Variable, ensuring the key is prefixed with 1. Create a CI/CD variable, ensuring the key is prefixed with
`K8S_SECRET_`. For example, you can create a variable with key `K8S_SECRET_`. For example, you can create a variable with key
`K8S_SECRET_RAILS_MASTER_KEY`. `K8S_SECRET_RAILS_MASTER_KEY`.
1. Run an Auto Devops pipeline either by manually creating a new 1. Run an Auto DevOps pipeline, either by manually creating a new
pipeline or by pushing a code change to GitLab. pipeline or by pushing a code change to GitLab.
Auto DevOps pipelines will take your application secret variables to Auto DevOps pipelines will take your application secret variables to
...@@ -394,6 +395,7 @@ example above, you can see the secret below containing the ...@@ -394,6 +395,7 @@ example above, you can see the secret below containing the
```shell ```shell
$ kubectl get secret production-secret -n minimal-ruby-app-54 -o yaml $ kubectl get secret production-secret -n minimal-ruby-app-54 -o yaml
apiVersion: v1 apiVersion: v1
data: data:
RAILS_MASTER_KEY: MTIzNC10ZXN0 RAILS_MASTER_KEY: MTIzNC10ZXN0
...@@ -408,40 +410,37 @@ metadata: ...@@ -408,40 +410,37 @@ metadata:
type: Opaque type: Opaque
``` ```
Environment variables are generally considered immutable in a Kubernetes Environment variables are generally considered immutable in a Kubernetes pod.
pod. Therefore, if you update an application secret without changing any If you update an application secret without changing any code, then manually
code then manually create a new pipeline, you will find that any running create a new pipeline, you will find any running application pods won't have
application pods will not have the updated secrets. In this case, you the updated secrets. To update the secrets, either:
can either push a code update to GitLab to force the Kubernetes
Deployment to recreate pods or manually delete running pods to - Push a code update to GitLab to force the Kubernetes deployment to recreate pods.
cause Kubernetes to create new pods with updated secrets. - Manually delete running pods to cause Kubernetes to create new pods with updated
secrets.
NOTE: **Note:** NOTE: **Note:**
Variables with multiline values are not currently supported due to Variables with multi-line values are not currently supported due to
limitations with the current Auto DevOps scripting environment. limitations with the current Auto DevOps scripting environment.
### Advanced replica variables setup ### Advanced replica variables setup
Apart from the two replica-related variables for production mentioned above, Apart from the two replica-related variables for production mentioned above,
you can also use others for different environments. you can also use other variables for different environments.
There's a very specific mapping between Kubernetes' label named `track`, The Kubernetes' label named `track`, GitLab CI/CD environment names, and the
GitLab CI/CD environment names, and the replicas environment variable. replicas environment variable are combined into the format `TRACK_ENV_REPLICAS`,
The general rule is: `TRACK_ENV_REPLICAS`. Where: enabling you to define your own variables for scaling the pod's replicas:
- `TRACK`: The capitalized value of the `track` - `TRACK`: The capitalized value of the `track`
[Kubernetes label](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) [Kubernetes label](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
in the Helm Chart app definition. If not set, it will not be taken into account in the Helm Chart app definition. If not set, it won't be taken into account
to the variable name. to the variable name.
- `ENV`: The capitalized environment name of the deploy job that is set in - `ENV`: The capitalized environment name of the deploy job, set in
`.gitlab-ci.yml`. `.gitlab-ci.yml`.
That way, you can define your own `TRACK_ENV_REPLICAS` variables with which In the example below, the environment's name is `qa`, and it deploys the track
you will be able to scale the pod's replicas easily. `foo`, which results in an environment variable named `FOO_QA_REPLICAS`:
In the example below, the environment's name is `qa` and it deploys the track
`foo` which would result in looking for the `FOO_QA_REPLICAS` environment
variable:
```yaml ```yaml
QA testing: QA testing:
...@@ -452,8 +451,7 @@ QA testing: ...@@ -452,8 +451,7 @@ QA testing:
- deploy foo - deploy foo
``` ```
The track `foo` being referenced would also need to be defined in the The track `foo` being referenced must also be defined in the application's Helm chart, like:
application's Helm chart, like:
```yaml ```yaml
replicaCount: 1 replicaCount: 1
...@@ -482,30 +480,29 @@ service: ...@@ -482,30 +480,29 @@ service:
TIP: **Tip:** TIP: **Tip:**
You can also set this inside your [project's settings](index.md#deployment-strategy). You can also set this inside your [project's settings](index.md#deployment-strategy).
The normal behavior of Auto DevOps is to use Continuous Deployment, pushing The normal behavior of Auto DevOps is to use continuous deployment, pushing
automatically to the `production` environment every time a new pipeline is run automatically to the `production` environment every time a new pipeline is run
on the default branch. However, there are cases where you might want to use a on the default branch. However, there are cases where you might want to use a
staging environment and deploy to production manually. For this scenario, the staging environment, and deploy to production manually. For this scenario, the
`STAGING_ENABLED` environment variable was introduced. `STAGING_ENABLED` environment variable was introduced.
If `STAGING_ENABLED` is defined in your project (e.g., set `STAGING_ENABLED` to If you define `STAGING_ENABLED`, such as setting `STAGING_ENABLED` to
`1` as a CI/CD variable), then the application will be automatically deployed `1` as a CI/CD variable, then GitLab automatically deploys the application
to a `staging` environment, and a `production_manual` job will be created for to a `staging` environment, and creates a `production_manual` job for
you when you're ready to manually deploy to production. you when you're ready to manually deploy to production.
### Deploy policy for canary environments **(PREMIUM)** ### Deploy policy for canary environments **(PREMIUM)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ci-yml/-/merge_requests/171) in GitLab 11.0. > [Introduced](https://gitlab.com/gitlab-org/gitlab-ci-yml/-/merge_requests/171) in GitLab 11.0.
A [canary environment](../../user/project/canary_deployments.md) can be used You can use a [canary environment](../../user/project/canary_deployments.md) before
before any changes are deployed to production. deploying any changes to production.
If `CANARY_ENABLED` is defined in your project (e.g., set `CANARY_ENABLED` to If you define `CANARY_ENABLED` in your project, such as setting `CANARY_ENABLED` to
`1` as a CI/CD variable) then two manual jobs will be created: `1` as a CI/CD variable, then two manual jobs are created:
- `canary` which will deploy the application to the canary environment - `canary` - Deploys the application to the canary environment.
- `production_manual` which is to be used by you when you're ready to manually - `production_manual` - Manually deploys the application to production.
deploy to production.
### Incremental rollout to production **(PREMIUM)** ### Incremental rollout to production **(PREMIUM)**
...@@ -514,10 +511,9 @@ If `CANARY_ENABLED` is defined in your project (e.g., set `CANARY_ENABLED` to ...@@ -514,10 +511,9 @@ If `CANARY_ENABLED` is defined in your project (e.g., set `CANARY_ENABLED` to
TIP: **Tip:** TIP: **Tip:**
You can also set this inside your [project's settings](index.md#deployment-strategy). You can also set this inside your [project's settings](index.md#deployment-strategy).
When you have a new version of your app to deploy in production, you may want When you're ready to deploy a new version of your app to production, you may want
to use an incremental rollout to replace just a few pods with the latest code. to use an incremental rollout to replace just a few pods with the latest code to
This will allow you to first check how the app is behaving, and later manually check how the application is behaving before manually increasing the rollout up to 100%.
increasing the rollout up to 100%.
If `INCREMENTAL_ROLLOUT_MODE` is set to `manual` in your project, then instead If `INCREMENTAL_ROLLOUT_MODE` is set to `manual` in your project, then instead
of the standard `production` job, 4 different of the standard `production` job, 4 different
...@@ -529,14 +525,14 @@ will be created: ...@@ -529,14 +525,14 @@ will be created:
1. `rollout 50%` 1. `rollout 50%`
1. `rollout 100%` 1. `rollout 100%`
The percentage is based on the `REPLICAS` variable and defines the number of The percentage is based on the `REPLICAS` variable, and defines the number of
pods you want to have for your deployment. If you say `10`, and then you run pods you want to have for your deployment. If the value is `10`, and you run the
the `10%` rollout job, there will be `1` new pod + `9` old ones. `10%` rollout job, there will be `1` new pod + `9` old ones.
To start a job, click on the play icon next to the job's name. You are not To start a job, click the play icon (**{play}**) next to the job's name. You're not
required to go from `10%` to `100%`, you can jump to whatever job you want. required to go from `10%` to `100%`, you can jump to whatever job you want.
You can also scale down by running a lower percentage job, just before hitting You can also scale down by running a lower percentage job, just before hitting
`100%`. Once you get to `100%`, you cannot scale down, and you'd have to roll `100%`. Once you get to `100%`, you can't scale down, and you'd have to roll
back by redeploying the old version using the back by redeploying the old version using the
[rollback button](../../ci/environments.md#retrying-and-rolling-back) in the [rollback button](../../ci/environments.md#retrying-and-rolling-back) in the
environment page. environment page.
...@@ -561,9 +557,9 @@ With `INCREMENTAL_ROLLOUT_MODE` set to `manual` and with `STAGING_ENABLED` ...@@ -561,9 +557,9 @@ With `INCREMENTAL_ROLLOUT_MODE` set to `manual` and with `STAGING_ENABLED`
![Rollout and staging enabled](img/rollout_staging_enabled.png) ![Rollout and staging enabled](img/rollout_staging_enabled.png)
CAUTION: **Caution:** CAUTION: **Caution:**
Before GitLab 11.4 this feature was enabled by the presence of the Before GitLab 11.4, the presence of the `INCREMENTAL_ROLLOUT_ENABLED` environment
`INCREMENTAL_ROLLOUT_ENABLED` environment variable. variable enabled this feature. This configuration is deprecated, and will be
This configuration is deprecated and will be removed in the future. removed in the future.
### Timed incremental rollout to production **(PREMIUM)** ### Timed incremental rollout to production **(PREMIUM)**
...@@ -577,8 +573,10 @@ This configuration is based on ...@@ -577,8 +573,10 @@ This configuration is based on
Everything behaves the same way, except: Everything behaves the same way, except:
- It's enabled by setting the `INCREMENTAL_ROLLOUT_MODE` variable to `timed`. - To enable it, set the `INCREMENTAL_ROLLOUT_MODE` variable to `timed`.
- Instead of the standard `production` job, the following jobs are created with a 5 minute delay between each : - Instead of the standard `production` job, the following jobs are created with
a 5 minute delay between each:
1. `timed rollout 10%` 1. `timed rollout 10%`
1. `timed rollout 25%` 1. `timed rollout 25%`
1. `timed rollout 50%` 1. `timed rollout 50%`
...@@ -586,15 +584,15 @@ Everything behaves the same way, except: ...@@ -586,15 +584,15 @@ Everything behaves the same way, except:
## Auto DevOps banner ## Auto DevOps banner
The following Auto DevOps banner will show for maintainers+ on new projects when Auto DevOps is not The following Auto DevOps banner displays for users with Maintainer or greater
enabled: permissions on new projects when Auto DevOps is not enabled:
![Auto DevOps banner](img/autodevops_banner_v12_6.png) ![Auto DevOps banner](img/autodevops_banner_v12_6.png)
The banner can be disabled for: The banner can be disabled for:
- A user when they dismiss it themselves. - A user, when they dismiss it themselves.
- A project by explicitly [disabling Auto DevOps](index.md#enablingdisabling-auto-devops). - A project, by explicitly [disabling Auto DevOps](index.md#enablingdisabling-auto-devops).
- An entire GitLab instance: - An entire GitLab instance:
- By an administrator running the following in a Rails console: - By an administrator running the following in a Rails console:
......
...@@ -418,12 +418,23 @@ spec: ...@@ -418,12 +418,23 @@ spec:
## Troubleshooting ## Troubleshooting
- Auto Build and Auto Test may fail in detecting your language/framework. There - Auto Build and Auto Test may fail to detect your language or framework with the
may be no buildpack for your application, or your application may be missing the following error:
key files the buildpack is looking for. For example, for Ruby applications, you must
have a `Gemfile` to be properly detected, even though it is possible to write a ```plaintext
Ruby app without a `Gemfile`. Try specifying a [custom Step 5/11 : RUN /bin/herokuish buildpack build
buildpack](customize.md#custom-buildpacks). ---> Running in eb468cd46085
-----> Unable to select a buildpack
The command '/bin/sh -c /bin/herokuish buildpack build' returned a non-zero code: 1
```
The following are possible reasons:
- Your application may be missing the key files the buildpack is looking for. For
example, for Ruby applications you must have a `Gemfile` to be properly detected,
even though it is possible to write a Ruby app without a `Gemfile`.
- There may be no buildpack for your application. Try specifying a
[custom buildpack](customize.md#custom-buildpacks).
- Auto Test may fail because of a mismatch between testing frameworks. In this - Auto Test may fail because of a mismatch between testing frameworks. In this
case, you may need to customize your `.gitlab-ci.yml` with your test commands. case, you may need to customize your `.gitlab-ci.yml` with your test commands.
- Auto Deploy will fail if GitLab can not create a Kubernetes namespace and - Auto Deploy will fail if GitLab can not create a Kubernetes namespace and
......
# Getting started with Auto DevOps # Getting started with Auto DevOps
This is a step-by-step guide that will help you use [Auto DevOps](index.md) to This step-by-step guide will help you use [Auto DevOps](index.md) to
deploy a project hosted on GitLab.com to Google Kubernetes Engine. deploy a project hosted on GitLab.com to Google Kubernetes Engine.
We will use GitLab's native Kubernetes integration, so you will not need You will use GitLab's native Kubernetes integration, so you won't need
to create a Kubernetes cluster manually using the Google Cloud Platform console. to create a Kubernetes cluster manually using the Google Cloud Platform console.
We will create and deploy a simple application that we create from a GitLab template. You will create and deploy a simple application that you create from a GitLab template.
These instructions will also work for a self-managed GitLab instance; you'll just These instructions will also work for a self-managed GitLab instance; you'll just
need to ensure your own [Runners are configured](../../ci/runners/README.md) and need to ensure your own [Runners are configured](../../ci/runners/README.md) and
[Google OAuth is enabled](../../integration/google.md). [Google OAuth is enabled](../../integration/google.md).
## Configuring your Google account ## Configure your Google account
Before creating and connecting your Kubernetes cluster to your GitLab project, Before creating and connecting your Kubernetes cluster to your GitLab project,
you need a Google Cloud Platform account. If you don't already have one, you need a [Google Cloud Platform account](https://console.cloud.google.com).
sign up at <https://console.cloud.google.com>. You'll need to either sign in with an existing Sign in with an existing Google account, such as the one you use to access Gmail
Google account (for example, one that you use to access Gmail, Drive, etc.) or create a new one. or Google Drive, or create a new one.
1. Follow the steps as outlined in the ["Before you begin" section of the Kubernetes Engine docs](https://cloud.google.com/kubernetes-engine/docs/quickstart#before-you-begin) 1. Follow the steps described in the ["Before you begin" section](https://cloud.google.com/kubernetes-engine/docs/quickstart#before-you-begin)
in order for the required APIs and related services to be enabled. of the Kubernetes Engine docs to enable the required APIs and related services.
1. Make sure you have created a [billing account](https://cloud.google.com/billing/docs/how-to/manage-billing-account). 1. Ensure you've created a [billing account](https://cloud.google.com/billing/docs/how-to/manage-billing-account)
with Google Cloud Platform.
TIP: **Tip:** TIP: **Tip:**
Every new Google Cloud Platform (GCP) account receives [$300 in credit](https://console.cloud.google.com/freetrial), Every new Google Cloud Platform (GCP) account receives [$300 in credit](https://console.cloud.google.com/freetrial),
and in partnership with Google, GitLab is able to offer an additional $200 for new GCP accounts to get started with GitLab's and in partnership with Google, GitLab is able to offer an additional $200 for new
Google Kubernetes Engine Integration. All you have to do is [follow this link](https://cloud.google.com/partners/partnercredit/?pcn_code=0014M00001h35gDQAQ#contact-form) and apply for credit. GCP accounts to get started with GitLab's Google Kubernetes Engine Integration.
[Follow this link](https://cloud.google.com/partners/partnercredit/?pcn_code=0014M00001h35gDQAQ#contact-form)
and apply for credit.
## Creating a new project from a template ## Create a new project from a template
We will use one of GitLab's project templates to get started. As the name suggests, We will use one of GitLab's project templates to get started. As the name suggests,
those projects provide a bare-bones application built on some well-known frameworks. those projects provide a bare-bones application built on some well-known frameworks.
1. In GitLab, click the plus icon (**+**) at the top of the navigation bar and select 1. In GitLab, click the plus icon (**{plus-square}**) at the top of the navigation bar, and select
**New project**. **New project**.
1. Go to the **Create from template** tab where you can choose among a Ruby on 1. Go to the **Create from template** tab, where you can choose among a Ruby on
Rails, Spring, or NodeJS Express project. Rails, Spring, or NodeJS Express project.
We'll use the Ruby on Rails template. For this tutorial, use the Ruby on Rails template.
![Select project template](img/guide_project_template_v12_3.png) ![Select project template](img/guide_project_template_v12_3.png)
...@@ -48,196 +51,215 @@ those projects provide a bare-bones application built on some well-known framewo ...@@ -48,196 +51,215 @@ those projects provide a bare-bones application built on some well-known framewo
1. Click **Create project**. 1. Click **Create project**.
Now that the project is created, the next step is to create the Kubernetes cluster Now that you've created a project, you'll next create the Kubernetes cluster
under which this application will be deployed. to deploy this project to.
## Creating a Kubernetes cluster from within GitLab ## Create a Kubernetes cluster from within GitLab
1. On the project's landing page, click the button labeled **Add Kubernetes cluster** 1. On your project's landing page, click **Add Kubernetes cluster**
(note that this option is also available when you navigate to **Operations > Kubernetes**). (note that this option is also available when you navigate to **{cloud-gear}** **Operations > Kubernetes**).
![Project landing page](img/guide_project_landing_page_v12_3.png) ![Project landing page](img/guide_project_landing_page_v12_10.png)
1. One the **Create new cluster on GKE** tab, click "Sign in with Google". 1. On the **Add a Kubernetes cluster integration** page, click the **Create new cluster** tab,
then click **Google GKE**.
![Google sign in](img/guide_google_signin_v12_3.png) ![Google sign in](img/guide_google_signin_v12_3.png)
1. Connect with your Google account and press **Allow** when asked (this will 1. Connect with your Google account, and click **Allow** to allow access to your
be shown only the first time you connect GitLab with your Google account). Google account. (This authorization request is only displayed the first time
you connect GitLab with your Google account.)
![Google auth](img/guide_google_auth_v12_3.png)
After authorizing access, the **Add a Kubernetes cluster integration** page
1. The last step is to provide the cluster details. Give it a name, leave the is displayed.
environment scope as is, and choose the GCP project under which the cluster
will be created. (Per the instructions when you 1. In the **Enter the details for your Kubernetes cluster** section, provide
[configured your Google account](#configuring-your-google-account), a project details about your cluster:
should have already been created for you.) Next, choose the
[region/zone](https://cloud.google.com/compute/docs/regions-zones/) under which the - **Kubernetes cluster name**
cluster will be created, enter the number of nodes you want it to have, and - **Environment scope** - Leave this field unchanged.
finally choose the [machine type](https://cloud.google.com/compute/docs/machine-types). - **Google Cloud Platform project** - Select a project. When you
[configured your Google account](#configure-your-google-account), a project
![GitLab GKE cluster details](img/guide_gitlab_gke_details_v12_3.png) should have already been created for you.
- **Zone** - The [region/zone](https://cloud.google.com/compute/docs/regions-zones/) to
1. Once ready, click **Create Kubernetes cluster**. create the cluster in.
- **Number of nodes**
- **Machine type** - For more information about
[machine types](https://cloud.google.com/compute/docs/machine-types), see Google's documentation.
- **Enable Cloud Run for Anthos** - Select this checkbox to use the Cloud Run,
Istio, and HTTP Load Balancing add-ons for this cluster.
- **GitLab-managed cluster** - Select this checkbox to
[allow GitLab to manage namespace and service accounts](../..//user/project/clusters/index.md#gitlab-managed-clusters) for this cluster.
1. Click **Create Kubernetes cluster**.
After a couple of minutes, the cluster will be created. You can also see its After a couple of minutes, the cluster will be created. You can also see its
status on your [GCP dashboard](https://console.cloud.google.com/kubernetes). status on your [GCP dashboard](https://console.cloud.google.com/kubernetes).
The next step is to install some applications on your cluster that are needed Next, you will install some applications on your cluster that are needed
to take full advantage of Auto DevOps. to take full advantage of Auto DevOps.
## Installing Helm, Ingress, and Prometheus ## Install the package manager
GitLab's Kubernetes integration comes with some After creating your Kubernetes cluster, GitLab's Kubernetes integration provides
[pre-defined applications](../../user/project/clusters/index.md#installing-applications) [pre-defined applications](../../user/project/clusters/index.md#installing-applications)
for you to install. for you to install. To install them, you must next install Helm Tiller, the
Kubernetes package manager for Kubernetes, to enable the installation of other applications.
![Cluster applications](img/guide_cluster_apps_v12_3.png)
The first one to install is Helm Tiller, a package manager for Kubernetes, which
is needed in order to install the rest of the applications. Go ahead and click
its **Install** button.
Once it's installed, the other applications that rely on it will each have their **Install** Next to **Helm Tiller**, click **Install**.
button enabled. For this guide, we need Ingress and Prometheus. Ingress provides
load balancing, SSL termination, and name-based virtual hosting, using NGINX behind
the scenes. Prometheus is an open-source monitoring and alerting system that we'll
use to supervise the deployed application. We will not install GitLab Runner as
we'll use the shared Runners that GitLab.com provides.
After the Ingress is installed, wait a few seconds and copy the IP address that ![Cluster applications](img/guide_cluster_apps_v12_3.png)
is displayed in order to add in your base **Domain** at the top of the page. For
the purpose of this guide, we will use the one suggested by GitLab. Once you have
filled in the domain, click **Save changes**.
![Cluster Base Domain](img/guide_base_domain_v12_3.png)
## Enabling Auto DevOps (optional) After installation completes, the page reloads, and you can install other
applications.
Starting with GitLab 11.3, Auto DevOps is enabled by default. However, it is possible to disable ## Install Ingress and Prometheus
Auto DevOps at both the instance-level (for self-managed instances) and also at the group-level.
Follow these steps if Auto DevOps has been manually disabled.
1. First, navigate to **Settings > CI/CD > Auto DevOps**. After installing **Helm Tiller**, you can install other applications that rely on it,
1. Select **Default to Auto DevOps pipeline**. including Ingress and Prometheus, which we will install in this quick start guide:
1. Lastly, let's select the [continuous deployment strategy](index.md#deployment-strategy)
which will automatically deploy the application to production once the pipeline
successfully runs on the `master` branch.
1. Click **Save changes**.
![Auto DevOps settings](img/guide_enable_autodevops_v12_3.png) - Ingress - Provides load balancing, SSL termination, and name-based virtual hosting,
using NGINX behind the scenes.
- Prometheus - An open-source monitoring and alerting system used to supervise the
deployed application.
Once you complete all the above and save your changes, a new pipeline is NOTE: **Note:**
automatically created. To view the pipeline, go to **CI/CD > Pipelines**. We won't install GitLab Runner in this quick start guide, as this guide uses the
shared Runners provided by GitLab.com.
![First pipeline](img/guide_first_pipeline_v12_3.png) To install the applications:
In the next section we'll break down the pipeline and explain what each job does. - Click the **Install** button for **Ingress**.
- When the **Ingress Endpoint** is displayed, copy the IP address.
- Add your **Base domain**. For this guide, we will use the domain suggested by GitLab.
- Click **Save changes**.
## Deploying the application ![Cluster Base Domain](img/guide_base_domain_v12_3.png)
By now you should see the pipeline running, but what is it running exactly? ## Enable Auto DevOps (optional)
To navigate inside the pipeline, click its status badge. (Its status should be "running"). While Auto DevOps is enabled by default, Auto DevOps can be disabled at both
The pipeline is split into 4 stages, each running a couple of jobs. the instance level (for self-managed instances) and the group level. Complete
these steps to enable Auto DevOps if it's disabled:
![Pipeline stages](img/guide_pipeline_stages_v12_3.png) 1. Navigate to **{settings}** **Settings > CI/CD > Auto DevOps**, and click **Expand**.
1. Select **Default to Auto DevOps pipeline** to display more options.
1. In **Deployment strategy**, select your desired [continuous deployment strategy](index.md#deployment-strategy)
to deploy the application to production after the pipeline successfully runs on the `master` branch.
1. Click **Save changes**.
In the **build** stage, the application is built into a Docker image and then ![Auto DevOps settings](img/guide_enable_autodevops_v12_3.png)
uploaded to your project's [Container Registry](../../user/packages/container_registry/index.md) ([Auto Build](stages.md#auto-build)).
In the **test** stage, GitLab runs various checks on the application: After you save your changes, GitLab creates a new pipeline. To view it, go to
**{rocket}** **CI/CD > Pipelines**.
- The `test` job runs unit and integration tests by detecting the language and ![First pipeline](img/guide_first_pipeline_v12_3.png)
framework ([Auto Test](stages.md#auto-test))
- The `code_quality` job checks the code quality and is allowed to fail
([Auto Code Quality](stages.md#auto-code-quality-starter)) **(STARTER)**
- The `container_scanning` job checks the Docker container if it has any
vulnerabilities and is allowed to fail ([Auto Container Scanning](stages.md#auto-container-scanning-ultimate))
- The `dependency_scanning` job checks if the application has any dependencies
susceptible to vulnerabilities and is allowed to fail ([Auto Dependency Scanning](stages.md#auto-dependency-scanning-ultimate)) **(ULTIMATE)**
- The `sast` job runs static analysis on the current code to check for potential
security issues and is allowed to fail([Auto SAST](stages.md#auto-sast-ultimate)) **(ULTIMATE)**
- The `license_management` job searches the application's dependencies to determine each of their
licenses and is allowed to fail ([Auto License Compliance](stages.md#auto-license-compliance-ultimate)) **(ULTIMATE)**
NOTE: **Note:** In the next section, we explain what each job does in the pipeline.
As you might have noticed, all jobs except `test` are allowed to fail in the
test stage.
The **production** stage is run after the tests and checks finish, and it automatically ## Deploy the application
deploys the application in Kubernetes ([Auto Deploy](stages.md#auto-deploy)).
Lastly, in the **performance** stage, some performance tests will run When your pipeline runs, what is it doing?
on the deployed application
([Auto Browser Performance Testing](stages.md#auto-browser-performance-testing-premium)). **(PREMIUM)**
--- To view the jobs in the pipeline, click the pipeline's status badge. The
**{status_running}** icon displays when pipeline jobs are running, and updates
without refreshing the page to **{status_success}** (for success) or
**{status_failed}** (for failure) when the jobs complete.
The URL for the deployed application can be found under the **Environments** The jobs are separated into stages:
page where you can also monitor your application. Let's explore that.
### Monitoring ![Pipeline stages](img/guide_pipeline_stages_v12_3.png)
Now that the application is successfully deployed, let's navigate to its - **Build** - The application builds a Docker image and uploads it to your project's
website. First, go to **Operations > Environments**. [Container Registry](../../user/packages/container_registry/index.md) ([Auto Build](stages.md#auto-build)).
- **Test** - GitLab runs various checks on the application:
- The `test` job runs unit and integration tests by detecting the language and
framework ([Auto Test](stages.md#auto-test))
- The `code_quality` job checks the code quality and is allowed to fail
([Auto Code Quality](stages.md#auto-code-quality-starter)) **(STARTER)**
- The `container_scanning` job checks the Docker container if it has any
vulnerabilities and is allowed to fail ([Auto Container Scanning](stages.md#auto-container-scanning-ultimate))
- The `dependency_scanning` job checks if the application has any dependencies
susceptible to vulnerabilities and is allowed to fail
([Auto Dependency Scanning](stages.md#auto-dependency-scanning-ultimate)) **(ULTIMATE)**
- The `sast` job runs static analysis on the current code to check for potential
security issues and is allowed to fail([Auto SAST](stages.md#auto-sast-ultimate)) **(ULTIMATE)**
- The `license_management` job searches the application's dependencies to determine each of their
licenses and is allowed to fail
([Auto License Compliance](stages.md#auto-license-compliance-ultimate)) **(ULTIMATE)**
NOTE: **Note:**
All jobs except `test` are allowed to fail in the test stage.
- **Production** - After the tests and checks finish, the application deploys in
Kubernetes ([Auto Deploy](stages.md#auto-deploy)).
- **Performance** - Performance tests are run on the deployed application
([Auto Browser Performance Testing](stages.md#auto-browser-performance-testing-premium)). **(PREMIUM)**
After running a pipeline, you should view your deployed website and learn how
to monitor it.
### Monitor your project
After successfully deploying your application, you can view its website and check
on its health on the **Environments** page by navigating to
**{cloud-gear}** **Operations > Environments**. This page displays details about
the deployed applications, and the right-hand column displays icons that link
you to common environment tasks:
![Environments](img/guide_environments_v12_3.png) ![Environments](img/guide_environments_v12_3.png)
In **Environments** you can see some details about the deployed - **{external-link}** **Open live environment** - Opens the URL of the application deployed in production
applications. In the rightmost column for the production environment, you can make use of the three icons: - **{chart}** **Monitoring** - Opens the metrics page where Prometheus collects data
about the Kubernetes cluster and how the application
- The first icon will open the URL of the application that is deployed in affects it in terms of memory usage, CPU usage, and latency
production. It's a very simple page, but the important part is that it works! - **{play}** **{angle-down}** **Deploy to** - Displays a list of environments you can deploy to
- The next icon, with the small graph, will take you to the metrics page where - **{terminal}** **Terminal** - Opens a [web terminal](../../ci/environments.md#web-terminals)
Prometheus collects data about the Kubernetes cluster and how the application session inside the container where the application is running
affects it (in terms of memory/CPU usage, latency, etc.). - **{repeat}** **Re-deploy to environment**
- **{stop}** **Stop environment**
![Environments metrics](img/guide_environments_metrics_v12_3.png)
GitLab displays the [Deploy Board](../../user/project/deploy_boards.md) below the
- The third icon is the [web terminal](../../ci/environments.md#web-terminals) environment's information, with squares representing pods in your
and it will open a terminal session right inside the container where the Kubernetes cluster, color-coded to show their status. Hovering over a square on
application is running. the deploy board displays the state of the deployment, and clicking the square
takes you to the pod's logs page.
Right below, there is the
[Deploy Board](../../user/project/deploy_boards.md).
The squares represent pods in your Kubernetes cluster that are associated with
the given environment. Hovering above each square you can see the state of a
deployment and clicking a square will take you to the pod's logs page.
TIP: **Tip:** TIP: **Tip:**
There is only one pod hosting the application at the moment, but you can add The example shows only one pod hosting the application at the moment, but you can add
more pods by defining the [`REPLICAS` variable](customize.md#environment-variables) more pods by defining the [`REPLICAS` variable](customize.md#environment-variables)
under **Settings > CI/CD > Environment variables**. in **{settings}** **Settings > CI/CD > Environment variables**.
### Working with branches ### Work with branches
Following the [GitLab flow](../gitlab_flow.md#working-with-feature-branches), Following the [GitLab flow](../gitlab_flow.md#working-with-feature-branches),
let's create a feature branch that will add some content to the application. you should next create a feature branch to add content to your application:
Under your repository, navigate to the following file: `app/views/welcome/index.html.erb`. 1. In your project's repository, navigate to the following file: `app/views/welcome/index.html.erb`.
By now, it should only contain a paragraph: `<p>You're on Rails!</p>`, so let's This file should only contain a paragraph: `<p>You're on Rails!</p>`.
start adding content. Let's use GitLab's [Web IDE](../../user/project/web_ide/index.md) to make the change. Once 1. Open the GitLab [Web IDE](../../user/project/web_ide/index.md) to make the change.
you're on the Web IDE, make the following change: 1. Edit the file so it contains:
```html ```html
<p>You're on Rails! Powered by GitLab Auto DevOps.</p> <p>You're on Rails! Powered by GitLab Auto DevOps.</p>
``` ```
Stage the file, add a commit message, and create a new branch and a merge request 1. Stage the file. Add a commit message, then create a new branch and a merge request
by clicking **Commit**. by clicking **Commit**.
![Web IDE commit](img/guide_ide_commit_v12_3.png) ![Web IDE commit](img/guide_ide_commit_v12_3.png)
Once you submit the merge request, you'll see the pipeline running. This will After submitting the merge request, GitLab runs your pipeline, and all the jobs
run all the jobs as [described previously](#deploying-the-application), as well as in it, as [described previously](#deploy-the-application), in addition to
a few more that run only on branches other than `master`. a few more that run only on branches other than `master`.
![Merge request](img/guide_merge_request_v12_3.png) ![Merge request](img/guide_merge_request_v12_3.png)
After a few minutes you'll notice that there was a failure in a test. After a few minutes you'll notice a test failed, which means a test was
This means there's a test that was 'broken' by our change. 'broken' by your change. Click on the failed `test` job to see more information
Navigating into the `test` job that failed, you can see what the broken test is: about it:
```plaintext ```plaintext
Failure: Failure:
...@@ -249,30 +271,31 @@ Expected 0 to be >= 1. ...@@ -249,30 +271,31 @@ Expected 0 to be >= 1.
bin/rails test test/controllers/welcome_controller_test.rb:4 bin/rails test test/controllers/welcome_controller_test.rb:4
``` ```
Let's fix that: To fix the broken test:
1. Back to the merge request, click the **Open in Web IDE** button. 1. Return to the **Overview** page for your merge request, and click **Open in Web IDE**.
1. Find the `test/controllers/welcome_controller_test.rb` file and open it. 1. In the left-hand directory of files, find the `test/controllers/welcome_controller_test.rb`
file, and click it to open it.
1. Change line 7 to say `You're on Rails! Powered by GitLab Auto DevOps.` 1. Change line 7 to say `You're on Rails! Powered by GitLab Auto DevOps.`
1. Click **Commit**. 1. Click **Commit**.
1. On your left, under "Unstaged changes", click the little checkmark icon 1. In the left-hand column, under **Unstaged changes**, click the checkmark icon
to stage the changes. to stage the changes.
1. Write a commit message and click **Commit**. 1. Write a commit message, and click **Commit**.
Now, if you go back to the merge request you should not only see the test passing, but Return to the **Overview** page of your merge request, and you should not only
also the application deployed as a [review app](stages.md#auto-review-apps). You see the test passing, but also the application deployed as a
can visit it by following clicking the **View app** button. You will see [review application](stages.md#auto-review-apps). You can visit it by clicking
the changes that we previously made. the **View app** button to see your changes deployed.
![Review app](img/guide_merge_request_review_app_v12_3.png) ![Review app](img/guide_merge_request_review_app_v12_3.png)
Once you merge the merge request, the pipeline will run on the `master` branch, After merging the merge request, GitLab runs the pipeline on the `master` branch,
and the application will be eventually deployed straight to production. and then deploys the application to production.
## Conclusion ## Conclusion
After implementing this project, you should now have a solid understanding of the basics of Auto DevOps. After implementing this project, you should have a solid understanding of the basics of Auto DevOps.
We started from building and testing to deploying and monitoring an application You started from building and testing, to deploying and monitoring an application
all within GitLab. Despite its automatic nature, Auto DevOps can also be configured all within GitLab. Despite its automatic nature, Auto DevOps can also be configured
and customized to fit your workflow. Here are some helpful resources for further reading: and customized to fit your workflow. Here are some helpful resources for further reading:
......
...@@ -58,7 +58,7 @@ under which this application will be deployed. ...@@ -58,7 +58,7 @@ under which this application will be deployed.
1. On the project's landing page, click **Add Kubernetes cluster** 1. On the project's landing page, click **Add Kubernetes cluster**
(note that this option is also available when you navigate to **Operations > Kubernetes**). (note that this option is also available when you navigate to **Operations > Kubernetes**).
![Project landing page](../autodevops/img/guide_project_landing_page_v12_3.png) ![Project landing page](../autodevops/img/guide_project_landing_page_v12_10.png)
1. On the **Create new cluster on GKE** tab, click **Sign in with Google**. 1. On the **Create new cluster on GKE** tab, click **Sign in with Google**.
......
...@@ -524,6 +524,12 @@ does not make any unsolicited requests including checking for updates. ...@@ -524,6 +524,12 @@ does not make any unsolicited requests including checking for updates.
The DAST job can emit various reports. The DAST job can emit various reports.
### List of URLs scanned
When DAST completes scanning, the merge request page states the number of URLs that were scanned. Click **View details** to view the web console output which includes the list of scanned URLs.
![DAST Widget](img/dast_urls_scanned_v12_10.png)
### JSON ### JSON
CAUTION: **Caution:** CAUTION: **Caution:**
......
...@@ -251,21 +251,14 @@ module API ...@@ -251,21 +251,14 @@ module API
end end
params do params do
requires :id, type: Integer, desc: %q(Job's ID) requires :id, type: Integer, desc: %q(Job's ID)
requires :file, type: ::API::Validations::Types::WorkhorseFile, desc: %(The artifact file to store (generated by Multipart middleware))
optional :token, type: String, desc: %q(Job's authentication token) optional :token, type: String, desc: %q(Job's authentication token)
optional :expire_in, type: String, desc: %q(Specify when artifacts should expire) optional :expire_in, type: String, desc: %q(Specify when artifacts should expire)
optional :artifact_type, type: String, desc: %q(The type of artifact), optional :artifact_type, type: String, desc: %q(The type of artifact),
default: 'archive', values: Ci::JobArtifact.file_types.keys default: 'archive', values: Ci::JobArtifact.file_types.keys
optional :artifact_format, type: String, desc: %q(The format of artifact), optional :artifact_format, type: String, desc: %q(The format of artifact),
default: 'zip', values: Ci::JobArtifact.file_formats.keys default: 'zip', values: Ci::JobArtifact.file_formats.keys
optional 'file.path', type: String, desc: %q(path to locally stored body (generated by Workhorse)) optional :metadata, type: ::API::Validations::Types::WorkhorseFile, desc: %(The artifact metadata to store (generated by Multipart middleware))
optional 'file.name', type: String, desc: %q(real filename as send in Content-Disposition (generated by Workhorse))
optional 'file.type', type: String, desc: %q(real content type as send in Content-Type (generated by Workhorse))
optional 'file.size', type: Integer, desc: %q(real size of file (generated by Workhorse))
optional 'file.sha256', type: String, desc: %q(sha256 checksum of the file (generated by Workhorse))
optional 'metadata.path', type: String, desc: %q(path to locally stored body (generated by Workhorse))
optional 'metadata.name', type: String, desc: %q(filename (generated by Workhorse))
optional 'metadata.size', type: Integer, desc: %q(real size of metadata (generated by Workhorse))
optional 'metadata.sha256', type: String, desc: %q(sha256 checksum of metadata (generated by Workhorse))
end end
post '/:id/artifacts' do post '/:id/artifacts' do
not_allowed! unless Gitlab.config.artifacts.enabled not_allowed! unless Gitlab.config.artifacts.enabled
...@@ -274,10 +267,9 @@ module API ...@@ -274,10 +267,9 @@ module API
job = authenticate_job! job = authenticate_job!
forbidden!('Job is not running!') unless job.running? forbidden!('Job is not running!') unless job.running?
artifacts = UploadedFile.from_params(params, :file, JobArtifactUploader.workhorse_local_upload_path) artifacts = params[:file]
metadata = UploadedFile.from_params(params, :metadata, JobArtifactUploader.workhorse_local_upload_path) metadata = params[:metadata]
bad_request!('Missing artifacts file!') unless artifacts
file_too_large! unless artifacts.size < max_artifacts_size(job) file_too_large! unless artifacts.size < max_artifacts_size(job)
result = Ci::CreateJobArtifactsService.new(job.project).execute(job, artifacts, params, metadata_file: metadata) result = Ci::CreateJobArtifactsService.new(job.project).execute(job, artifacts, params, metadata_file: metadata)
......
...@@ -3,28 +3,11 @@ ...@@ -3,28 +3,11 @@
module Banzai module Banzai
module Filter module Filter
class FrontMatterFilter < HTML::Pipeline::Filter class FrontMatterFilter < HTML::Pipeline::Filter
DELIM_LANG = {
'---' => 'yaml',
'+++' => 'toml',
';;;' => 'json'
}.freeze
DELIM = Regexp.union(DELIM_LANG.keys)
PATTERN = %r{
\A(?:[^\r\n]*coding:[^\r\n]*)? # optional encoding line
\s*
^(?<delim>#{DELIM})[ \t]*(?<lang>\S*) # opening front matter marker (optional language specifier)
\s*
^(?<front_matter>.*?) # front matter (not greedy)
\s*
^\k<delim> # closing front matter marker
\s*
}mx.freeze
def call def call
html.sub(PATTERN) do |_match| lang_mapping = Gitlab::FrontMatter::DELIM_LANG
lang = $~[:lang].presence || DELIM_LANG[$~[:delim]]
html.sub(Gitlab::FrontMatter::PATTERN) do |_match|
lang = $~[:lang].presence || lang_mapping[$~[:delim]]
["```#{lang}", $~[:front_matter], "```", "\n"].join("\n") ["```#{lang}", $~[:front_matter], "```", "\n"].join("\n")
end end
......
# frozen_string_literal: true
module Gitlab
module FrontMatter
DELIM_LANG = {
'---' => 'yaml',
'+++' => 'toml',
';;;' => 'json'
}.freeze
DELIM = Regexp.union(DELIM_LANG.keys)
PATTERN = %r{
\A(?:[^\r\n]*coding:[^\r\n]*)? # optional encoding line
\s*
^(?<delim>#{DELIM})[ \t]*(?<lang>\S*) # opening front matter marker (optional language specifier)
\s*
^(?<front_matter>.*?) # front matter block content (not greedy)
\s*
^(\k<delim> | \.{3}) # closing front matter marker
\s*
}mx.freeze
end
end
...@@ -107,6 +107,7 @@ module Gitlab ...@@ -107,6 +107,7 @@ module Gitlab
[ [
::FileUploader.root, ::FileUploader.root,
Gitlab.config.uploads.storage_path, Gitlab.config.uploads.storage_path,
JobArtifactUploader.workhorse_upload_path,
File.join(Rails.root, 'public/uploads/tmp') File.join(Rails.root, 'public/uploads/tmp')
] ]
end end
...@@ -125,6 +126,8 @@ module Gitlab ...@@ -125,6 +126,8 @@ module Gitlab
Handler.new(env, message).with_open_files do Handler.new(env, message).with_open_files do
@app.call(env) @app.call(env)
end end
rescue UploadedFile::InvalidPathError => e
[400, { 'Content-Type' => 'text/plain' }, e.message]
end end
end end
end end
......
# frozen_string_literal: true
module Gitlab
module WikiPages
# Many common file systems have a limit of 255 bytes for file and
# directory names, and while Git and GitLab both support paths exceeding
# those limits, the presence of them makes it impossible for users on
# those file systems to checkout a wiki repository locally.
# To avoid this situation, we enforce these limits when editing pages
# through the GitLab web interface and API:
MAX_TITLE_BYTES = 245 # reserving 10 bytes for the file extension
MAX_DIRECTORY_BYTES = 255
end
end
# frozen_string_literal: true
module Gitlab
module WikiPages
class FrontMatterParser
FEATURE_FLAG = :wiki_front_matter
# We limit the maximum length of text we are prepared to parse as YAML, to
# avoid exploitations and attempts to consume memory and CPU. We allow for:
# - a title line
# - a "slugs:" line
# - and up to 50 slugs
#
# This limit does not take comments into account.
MAX_SLUGS = 50
SLUG_LINE_LENGTH = (4 + Gitlab::WikiPages::MAX_DIRECTORY_BYTES + 1 + Gitlab::WikiPages::MAX_TITLE_BYTES)
MAX_FRONT_MATTER_LENGTH = (8 + Gitlab::WikiPages::MAX_TITLE_BYTES) + 7 + (SLUG_LINE_LENGTH * MAX_SLUGS)
ParseError = Class.new(StandardError)
class Result
attr_reader :front_matter, :content, :reason, :error
def initialize(content:, front_matter: {}, reason: nil, error: nil)
@content = content
@front_matter = front_matter.freeze
@reason = reason
@error = error
end
end
# @param [String] wiki_content
# @param [FeatureGate] feature_gate The scope for feature availability
def initialize(wiki_content, feature_gate)
@wiki_content = wiki_content
@feature_gate = feature_gate
end
def self.enabled?(gate = nil)
Feature.enabled?(FEATURE_FLAG, gate)
end
def parse
return empty_result unless enabled? && wiki_content.present?
return empty_result(block.error) unless block.valid?
Result.new(front_matter: block.data, content: strip_front_matter_block)
rescue ParseError => error
empty_result(:parse_error, error)
end
class Block
include Gitlab::Utils::StrongMemoize
def initialize(delim = nil, lang = '', text = nil)
@lang = lang.downcase.presence || Gitlab::FrontMatter::DELIM_LANG[delim]
@text = text
end
def data
@data ||= YAML.safe_load(text, symbolize_names: true)
rescue Psych::DisallowedClass, Psych::SyntaxError => error
raise ParseError, error.message
end
def valid?
error.nil?
end
def error
strong_memoize(:error) { no_match? || too_long? || not_yaml? || not_mapping? }
end
private
attr_reader :lang, :text
def no_match?
:no_match if text.nil?
end
def not_yaml?
:not_yaml if lang != 'yaml'
end
def too_long?
:too_long if text.size > MAX_FRONT_MATTER_LENGTH
end
def not_mapping?
:not_mapping unless data.is_a?(Hash)
end
end
private
attr_reader :wiki_content, :feature_gate
def empty_result(reason = nil, error = nil)
Result.new(content: wiki_content, reason: reason, error: error)
end
def enabled?
self.class.enabled?(feature_gate)
end
def block
@block ||= parse_front_matter_block
end
def parse_front_matter_block
wiki_content.match(Gitlab::FrontMatter::PATTERN) { |m| Block.new(*m.captures) } || Block.new
end
def strip_front_matter_block
wiki_content.gsub(Gitlab::FrontMatter::PATTERN, '')
end
end
end
end
...@@ -89,6 +89,17 @@ describe Gitlab::Middleware::Multipart do ...@@ -89,6 +89,17 @@ describe Gitlab::Middleware::Multipart do
end end
end end
it 'allows files in the job artifact upload path' do
with_tmp_dir('artifacts') do |dir, env|
expect(JobArtifactUploader).to receive(:workhorse_upload_path).and_return(File.join(dir, 'artifacts'))
expect(app).to receive(:call) do |env|
expect(get_params(env)['file']).to be_a(::UploadedFile)
end
middleware.call(env)
end
end
it 'allows symlinks for uploads dir' do it 'allows symlinks for uploads dir' do
Tempfile.open('two-levels') do |tempfile| Tempfile.open('two-levels') do |tempfile|
symlinked_dir = '/some/dir/uploads' symlinked_dir = '/some/dir/uploads'
......
# frozen_string_literal: true
require 'spec_helper'
describe Gitlab::WikiPages::FrontMatterParser do
subject(:parser) { described_class.new(raw_content, gate) }
let(:content) { 'This is the content' }
let(:end_divider) { '---' }
let(:gate) { double('Gate') }
let(:with_front_matter) do
<<~MD
---
a: 1
b: 2
c:
- foo
- bar
date: I am safe. Not actually a date
#{end_divider}
#{content}
MD
end
def have_correct_front_matter
include(a: 1, b: 2, c: %w(foo bar))
end
describe '#parse' do
subject { parser.parse }
context 'there is front matter' do
let(:raw_content) { with_front_matter }
it do
is_expected.to have_attributes(
front_matter: have_correct_front_matter,
content: content + "\n",
error: be_nil
)
end
end
context 'there is no content' do
let(:raw_content) { '' }
it do
is_expected.to have_attributes(
front_matter: {},
content: raw_content,
error: be_nil
)
end
end
context 'there is no front_matter' do
let(:raw_content) { content }
it { is_expected.to have_attributes(front_matter: be_empty, content: raw_content) }
it { is_expected.to have_attributes(reason: :no_match) }
end
context 'the feature flag is disabled' do
let(:raw_content) { with_front_matter }
before do
stub_feature_flags(Gitlab::WikiPages::FrontMatterParser::FEATURE_FLAG => false)
end
it { is_expected.to have_attributes(front_matter: be_empty, content: raw_content) }
end
context 'the feature flag is enabled for the gated object' do
let(:raw_content) { with_front_matter }
before do
stub_feature_flags(Gitlab::WikiPages::FrontMatterParser::FEATURE_FLAG => false)
stub_feature_flags(Gitlab::WikiPages::FrontMatterParser::FEATURE_FLAG => {
enabled: true,
thing: gate
})
end
it do
is_expected.to have_attributes(
front_matter: have_correct_front_matter,
content: content + "\n",
reason: be_nil
)
end
end
context 'the end divider is ...' do
let(:end_divider) { '...' }
let(:raw_content) { with_front_matter }
it { is_expected.to have_attributes(front_matter: have_correct_front_matter) }
end
context 'the front-matter is not a mapping' do
let(:raw_content) do
<<~MD
---
- thing one
- thing two
---
#{content}
MD
end
it { is_expected.to have_attributes(reason: :not_mapping) }
end
context 'there is nothing in the front-matter block' do
let(:raw_content) do
<<~MD
---
---
My content here
MD
end
it { is_expected.to have_attributes(reason: :not_mapping) }
end
context 'there is a string in the YAML block' do
let(:raw_content) do
<<~MD
---
This is a string
---
#{content}
MD
end
it { is_expected.to have_attributes(reason: :not_mapping) }
end
context 'there is dangerous YAML in the block' do
let(:raw_content) do
<<~MD
---
date: 2010-02-11 11:02:57
---
#{content}
MD
end
it { is_expected.to have_attributes(reason: :parse_error, error: be_present) }
end
context 'there is acceptably long YAML in the front-matter block' do
let(:raw_content) do
key = 'title: '
length = described_class::MAX_FRONT_MATTER_LENGTH - key.size
<<~MD
---
title: #{FFaker::Lorem.characters(length)}
---
#{content}
MD
end
it { is_expected.to have_attributes(front_matter: include(title: be_present)) }
end
context 'there is suspiciously long YAML in the front-matter block' do
let(:raw_content) do
<<~MD
---
title: #{FFaker::Lorem.characters(described_class::MAX_FRONT_MATTER_LENGTH)}
---
#{content}
MD
end
it { is_expected.to have_attributes(reason: :too_long) }
end
context 'TOML front matter' do
let(:raw_content) do
<<~MD
+++
title = "My title"
+++
#{content}
MD
end
it { is_expected.to have_attributes(reason: :not_yaml) }
end
context 'TOML style fences, advertised as YAML' do
let(:raw_content) do
<<~MD
+++ yaml
title: "My title"
+++
#{content}
MD
end
it { is_expected.to have_attributes(front_matter: include(title: 'My title')) }
end
context 'YAML, advertised as something else' do
let(:raw_content) do
<<~MD
--- toml
title: My title
---
#{content}
MD
end
it { is_expected.to have_attributes(reason: :not_yaml) }
end
context 'there is text content in the YAML block, in comments' do
let(:raw_content) do
<<~MD
---
# This is YAML
#
# It has comments though. Explaining things
foo: 1
## It has headings
headings:
- heading one
- heading two
# And lists
lists:
- and lists
- with things in them
---
#{content}
MD
end
it { is_expected.to have_attributes(front_matter: include(foo: 1)) }
end
context 'there is text content in the YAML block' do
let(:raw_content) do
<<~MD
---
# This is not YAML
In fact is looks like markdown
## It has headings
Paragraphs
- and lists
- with things in them
---
#{content}
MD
end
it { is_expected.to have_attributes(reason: :not_mapping) }
end
end
end
# frozen_string_literal: true
require 'spec_helper'
require Rails.root.join('db', 'post_migrate', '20200204113225_schedule_recalculate_project_authorizations_third_run.rb')
describe ScheduleRecalculateProjectAuthorizationsThirdRun do
let(:users_table) { table(:users) }
before do
stub_const("#{described_class}::BATCH_SIZE", 2)
1.upto(4) do |i|
users_table.create!(id: i, name: "user#{i}", email: "user#{i}@example.com", projects_limit: 1)
end
end
it 'schedules background migration' do
Sidekiq::Testing.fake! do
Timecop.freeze do
migrate!
expect(BackgroundMigrationWorker.jobs.size).to eq(2)
expect(described_class::MIGRATION).to be_scheduled_migration(1, 2)
expect(described_class::MIGRATION).to be_scheduled_migration(3, 4)
end
end
end
end
...@@ -9,10 +9,8 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do ...@@ -9,10 +9,8 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do
end end
end end
let(:session) do let(:rack_session) { Rack::Session::SessionId.new('6919a6f1bb119dd7396fadc38fd18d0d') }
double(:session, { id: '6919a6f1bb119dd7396fadc38fd18d0d', let(:session) { instance_double(ActionDispatch::Request::Session, id: rack_session, '[]': {}) }
'[]': {} })
end
let(:request) do let(:request) do
double(:request, { double(:request, {
...@@ -25,13 +23,13 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do ...@@ -25,13 +23,13 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do
describe '#current?' do describe '#current?' do
it 'returns true if the active session matches the current session' do it 'returns true if the active session matches the current session' do
active_session = ActiveSession.new(session_id: '6919a6f1bb119dd7396fadc38fd18d0d') active_session = ActiveSession.new(session_id: rack_session)
expect(active_session.current?(session)).to be true expect(active_session.current?(session)).to be true
end end
it 'returns false if the active session does not match the current session' do it 'returns false if the active session does not match the current session' do
active_session = ActiveSession.new(session_id: '59822c7d9fcdfa03725eff41782ad97d') active_session = ActiveSession.new(session_id: Rack::Session::SessionId.new('59822c7d9fcdfa03725eff41782ad97d'))
expect(active_session.current?(session)).to be false expect(active_session.current?(session)).to be false
end end
...@@ -46,14 +44,12 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do ...@@ -46,14 +44,12 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do
describe '#public_id' do describe '#public_id' do
it 'returns an encrypted, url-encoded session id' do it 'returns an encrypted, url-encoded session id' do
original_session_id = "!*'();:@&\n=+$,/?%abcd#123[4567]8" original_session_id = Rack::Session::SessionId.new("!*'();:@&\n=+$,/?%abcd#123[4567]8")
active_session = ActiveSession.new(session_id: original_session_id) active_session = ActiveSession.new(session_id: original_session_id)
encrypted_encoded_id = active_session.public_id encrypted_id = active_session.public_id
encrypted_id = CGI.unescape(encrypted_encoded_id)
derived_session_id = Gitlab::CryptoHelper.aes256_gcm_decrypt(encrypted_id) derived_session_id = Gitlab::CryptoHelper.aes256_gcm_decrypt(encrypted_id)
expect(original_session_id).to eq derived_session_id expect(original_session_id.public_id).to eq derived_session_id
end end
end end
...@@ -104,7 +100,8 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do ...@@ -104,7 +100,8 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do
describe '.list_sessions' do describe '.list_sessions' do
it 'uses the ActiveSession lookup to return original sessions' do it 'uses the ActiveSession lookup to return original sessions' do
Gitlab::Redis::SharedState.with do |redis| Gitlab::Redis::SharedState.with do |redis|
redis.set("session:gitlab:6919a6f1bb119dd7396fadc38fd18d0d", Marshal.dump({ _csrf_token: 'abcd' })) # Emulate redis-rack: https://github.com/redis-store/redis-rack/blob/c75f7f1a6016ee224e2615017fbfee964f23a837/lib/rack/session/redis.rb#L88
redis.set("session:gitlab:#{rack_session.private_id}", Marshal.dump({ _csrf_token: 'abcd' }))
redis.sadd( redis.sadd(
"session:lookup:user:gitlab:#{user.id}", "session:lookup:user:gitlab:#{user.id}",
...@@ -127,17 +124,18 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do ...@@ -127,17 +124,18 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do
redis.sadd("session:lookup:user:gitlab:#{user.id}", session_ids) redis.sadd("session:lookup:user:gitlab:#{user.id}", session_ids)
end end
expect(ActiveSession.session_ids_for_user(user.id)).to eq(session_ids) expect(ActiveSession.session_ids_for_user(user.id).map(&:to_s)).to eq(session_ids)
end end
end end
describe '.sessions_from_ids' do describe '.sessions_from_ids' do
it 'uses the ActiveSession lookup to return original sessions' do it 'uses the ActiveSession lookup to return original sessions' do
Gitlab::Redis::SharedState.with do |redis| Gitlab::Redis::SharedState.with do |redis|
redis.set("session:gitlab:6919a6f1bb119dd7396fadc38fd18d0d", Marshal.dump({ _csrf_token: 'abcd' })) # Emulate redis-rack: https://github.com/redis-store/redis-rack/blob/c75f7f1a6016ee224e2615017fbfee964f23a837/lib/rack/session/redis.rb#L88
redis.set("session:gitlab:#{rack_session.private_id}", Marshal.dump({ _csrf_token: 'abcd' }))
end end
expect(ActiveSession.sessions_from_ids(['6919a6f1bb119dd7396fadc38fd18d0d'])).to eq [{ _csrf_token: 'abcd' }] expect(ActiveSession.sessions_from_ids([rack_session])).to eq [{ _csrf_token: 'abcd' }]
end end
it 'avoids a redis lookup for an empty array' do it 'avoids a redis lookup for an empty array' do
...@@ -152,11 +150,12 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do ...@@ -152,11 +150,12 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do
redis = double(:redis) redis = double(:redis)
expect(Gitlab::Redis::SharedState).to receive(:with).and_yield(redis) expect(Gitlab::Redis::SharedState).to receive(:with).and_yield(redis)
sessions = %w[session-a session-b] sessions = %w[session-a session-b session-c session-d]
mget_responses = sessions.map { |session| [Marshal.dump(session)]} mget_responses = sessions.map { |session| [Marshal.dump(session)]}
expect(redis).to receive(:mget).twice.and_return(*mget_responses) expect(redis).to receive(:mget).exactly(4).times.and_return(*mget_responses)
expect(ActiveSession.sessions_from_ids([1, 2])).to eql(sessions) session_ids = [1, 2].map { |id| Rack::Session::SessionId.new(id.to_s) }
expect(ActiveSession.sessions_from_ids(session_ids).map(&:to_s)).to eql(sessions)
end end
end end
...@@ -212,6 +211,12 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do ...@@ -212,6 +211,12 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do
end end
describe '.destroy' do describe '.destroy' do
it 'gracefully handles a nil session ID' do
expect(described_class).not_to receive(:destroy_sessions)
ActiveSession.destroy(user, nil)
end
it 'removes the entry associated with the currently killed user session' do it 'removes the entry associated with the currently killed user session' do
Gitlab::Redis::SharedState.with do |redis| Gitlab::Redis::SharedState.with do |redis|
redis.set("session:user:gitlab:#{user.id}:6919a6f1bb119dd7396fadc38fd18d0d", '') redis.set("session:user:gitlab:#{user.id}:6919a6f1bb119dd7396fadc38fd18d0d", '')
...@@ -244,8 +249,9 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do ...@@ -244,8 +249,9 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do
it 'removes the devise session' do it 'removes the devise session' do
Gitlab::Redis::SharedState.with do |redis| Gitlab::Redis::SharedState.with do |redis|
redis.set("session:user:gitlab:#{user.id}:6919a6f1bb119dd7396fadc38fd18d0d", '') redis.set("session:user:gitlab:#{user.id}:#{rack_session.public_id}", '')
redis.set("session:gitlab:6919a6f1bb119dd7396fadc38fd18d0d", '') # Emulate redis-rack: https://github.com/redis-store/redis-rack/blob/c75f7f1a6016ee224e2615017fbfee964f23a837/lib/rack/session/redis.rb#L88
redis.set("session:gitlab:#{rack_session.private_id}", '')
end end
ActiveSession.destroy(user, request.session.id) ActiveSession.destroy(user, request.session.id)
...@@ -322,7 +328,7 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do ...@@ -322,7 +328,7 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do
(1..max_number_of_sessions_plus_two).each do |number| (1..max_number_of_sessions_plus_two).each do |number|
redis.set( redis.set(
"session:user:gitlab:#{user.id}:#{number}", "session:user:gitlab:#{user.id}:#{number}",
Marshal.dump(ActiveSession.new(session_id: "#{number}", updated_at: number.days.ago)) Marshal.dump(ActiveSession.new(session_id: number.to_s, updated_at: number.days.ago))
) )
redis.sadd( redis.sadd(
"session:lookup:user:gitlab:#{user.id}", "session:lookup:user:gitlab:#{user.id}",
......
...@@ -20,6 +20,17 @@ describe WikiPage do ...@@ -20,6 +20,17 @@ describe WikiPage do
subject { new_page } subject { new_page }
def disable_front_matter
stub_feature_flags(Gitlab::WikiPages::FrontMatterParser::FEATURE_FLAG => false)
end
def enable_front_matter_for_project
stub_feature_flags(Gitlab::WikiPages::FrontMatterParser::FEATURE_FLAG => {
thing: project,
enabled: true
})
end
describe '.group_by_directory' do describe '.group_by_directory' do
context 'when there are no pages' do context 'when there are no pages' do
it 'returns an empty array' do it 'returns an empty array' do
...@@ -101,6 +112,119 @@ describe WikiPage do ...@@ -101,6 +112,119 @@ describe WikiPage do
end end
end end
describe '#front_matter' do
let_it_be(:project) { create(:project) }
let(:wiki_page) { create(:wiki_page, project: project, content: content) }
shared_examples 'a page without front-matter' do
it { expect(wiki_page).to have_attributes(front_matter: {}, content: content) }
end
shared_examples 'a page with front-matter' do
let(:front_matter) { { title: 'Foo', slugs: %w[slug_a slug_b] } }
it { expect(wiki_page.front_matter).to eq(front_matter) }
end
context 'the wiki page has front matter' do
let(:content) do
<<~MD
---
title: Foo
slugs:
- slug_a
- slug_b
---
My actual content
MD
end
it_behaves_like 'a page with front-matter'
it 'strips the front matter from the content' do
expect(wiki_page.content.strip).to eq('My actual content')
end
context 'the feature flag is off' do
before do
disable_front_matter
end
it_behaves_like 'a page without front-matter'
context 'but enabled for the project' do
before do
enable_front_matter_for_project
end
it_behaves_like 'a page with front-matter'
end
end
end
context 'the wiki page does not have front matter' do
let(:content) { 'My actual content' }
it_behaves_like 'a page without front-matter'
end
context 'the wiki page has fenced blocks, but nothing in them' do
let(:content) do
<<~MD
---
---
My actual content
MD
end
it_behaves_like 'a page without front-matter'
end
context 'the wiki page has invalid YAML type in fenced blocks' do
let(:content) do
<<~MD
---
this isn't YAML
---
My actual content
MD
end
it_behaves_like 'a page without front-matter'
end
context 'the wiki page has a disallowed class in fenced block' do
let(:content) do
<<~MD
---
date: 2010-02-11 11:02:57
---
My actual content
MD
end
it_behaves_like 'a page without front-matter'
end
context 'the wiki page has invalid YAML in fenced block' do
let(:content) do
<<~MD
---
invalid-use-of-reserved-indicator: @text
---
My actual content
MD
end
it_behaves_like 'a page without front-matter'
end
end
describe '.unhyphenize' do describe '.unhyphenize' do
it 'removes hyphens from a name' do it 'removes hyphens from a name' do
name = 'a-name--with-hyphens' name = 'a-name--with-hyphens'
...@@ -155,8 +279,8 @@ describe WikiPage do ...@@ -155,8 +279,8 @@ describe WikiPage do
end end
describe '#validate_path_limits' do describe '#validate_path_limits' do
let(:max_title) { described_class::MAX_TITLE_BYTES } let(:max_title) { Gitlab::WikiPages::MAX_TITLE_BYTES }
let(:max_directory) { described_class::MAX_DIRECTORY_BYTES } let(:max_directory) { Gitlab::WikiPages::MAX_DIRECTORY_BYTES }
where(:character) do where(:character) do
['a', 'ä', '🙈'] ['a', 'ä', '🙈']
...@@ -296,7 +420,7 @@ describe WikiPage do ...@@ -296,7 +420,7 @@ describe WikiPage do
subject.update(content: "new content") subject.update(content: "new content")
page = wiki.find_page(title) page = wiki.find_page(title)
expect(page.content).to eq('new content') expect([subject.content, page.content]).to all(eq('new content'))
end end
it "returns true" do it "returns true" do
...@@ -333,7 +457,7 @@ describe WikiPage do ...@@ -333,7 +457,7 @@ describe WikiPage do
subject.update(content: new_content) subject.update(content: new_content)
page = wiki.find_page('test page') page = wiki.find_page('test page')
expect(page.content).to eq("new content") expect([subject.content, page.content]).to all(eq("new content"))
end end
it "updates the title of the page" do it "updates the title of the page" do
...@@ -342,7 +466,75 @@ describe WikiPage do ...@@ -342,7 +466,75 @@ describe WikiPage do
subject.update(title: new_title) subject.update(title: new_title)
page = wiki.find_page(new_title) page = wiki.find_page(new_title)
expect(page.title).to eq(new_title) expect([subject.title, page.title]).to all(eq(new_title))
end
describe 'updating front_matter' do
shared_examples 'able to update front-matter' do
it 'updates the wiki-page front-matter' do
title = subject.title
content = subject.content
subject.update(front_matter: { slugs: ['x'] })
page = wiki.find_page(title)
expect([subject, page]).to all(
have_attributes(
front_matter: include(slugs: include('x')),
content: content
))
end
end
it_behaves_like 'able to update front-matter'
context 'the front matter is too long' do
let(:new_front_matter) do
{
title: generate(:wiki_page_title),
slugs: Array.new(51).map { FFaker::Lorem.characters(512) }
}
end
it 'raises an error' do
expect { subject.update(front_matter: new_front_matter) }.to raise_error(described_class::FrontMatterTooLong)
end
end
context 'the front-matter feature flag is not enabled' do
before do
disable_front_matter
end
it 'does not update the front-matter' do
content = subject.content
subject.update(front_matter: { slugs: ['x'] })
page = wiki.find_page(subject.title)
expect([subject, page]).to all(have_attributes(front_matter: be_empty, content: content))
end
context 'but it is enabled for the project' do
before do
enable_front_matter_for_project
end
it_behaves_like 'able to update front-matter'
end
end
it 'updates the wiki-page front-matter and content together' do
title = subject.title
content = 'totally new content'
subject.update(content: content, front_matter: { slugs: ['x'] })
page = wiki.find_page(title)
expect([subject, page]).to all(
have_attributes(
front_matter: include(slugs: include('x')),
content: content
))
end
end end
it "returns true" do it "returns true" do
......
...@@ -1422,8 +1422,8 @@ describe API::Runner, :clean_gitlab_redis_shared_state do ...@@ -1422,8 +1422,8 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
describe 'artifacts' do describe 'artifacts' do
let(:job) { create(:ci_build, :pending, user: user, project: project, pipeline: pipeline, runner_id: runner.id) } let(:job) { create(:ci_build, :pending, user: user, project: project, pipeline: pipeline, runner_id: runner.id) }
let(:jwt_token) { JWT.encode({ 'iss' => 'gitlab-workhorse' }, Gitlab::Workhorse.secret, 'HS256') } let(:jwt) { JWT.encode({ 'iss' => 'gitlab-workhorse' }, Gitlab::Workhorse.secret, 'HS256') }
let(:headers) { { 'GitLab-Workhorse' => '1.0', Gitlab::Workhorse::INTERNAL_API_REQUEST_HEADER => jwt_token } } let(:headers) { { 'GitLab-Workhorse' => '1.0', Gitlab::Workhorse::INTERNAL_API_REQUEST_HEADER => jwt } }
let(:headers_with_token) { headers.merge(API::Helpers::Runner::JOB_TOKEN_HEADER => job.token) } let(:headers_with_token) { headers.merge(API::Helpers::Runner::JOB_TOKEN_HEADER => job.token) }
let(:file_upload) { fixture_file_upload('spec/fixtures/banana_sample.gif', 'image/gif') } let(:file_upload) { fixture_file_upload('spec/fixtures/banana_sample.gif', 'image/gif') }
let(:file_upload2) { fixture_file_upload('spec/fixtures/dk.png', 'image/gif') } let(:file_upload2) { fixture_file_upload('spec/fixtures/dk.png', 'image/gif') }
...@@ -1703,12 +1703,12 @@ describe API::Runner, :clean_gitlab_redis_shared_state do ...@@ -1703,12 +1703,12 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
it 'fails to post artifacts without GitLab-Workhorse' do it 'fails to post artifacts without GitLab-Workhorse' do
post api("/jobs/#{job.id}/artifacts"), params: { token: job.token }, headers: {} post api("/jobs/#{job.id}/artifacts"), params: { token: job.token }, headers: {}
expect(response).to have_gitlab_http_status(:forbidden) expect(response).to have_gitlab_http_status(:bad_request)
end end
end end
context 'Is missing GitLab Workhorse token headers' do context 'Is missing GitLab Workhorse token headers' do
let(:jwt_token) { JWT.encode({ 'iss' => 'invalid-header' }, Gitlab::Workhorse.secret, 'HS256') } let(:jwt) { JWT.encode({ 'iss' => 'invalid-header' }, Gitlab::Workhorse.secret, 'HS256') }
it 'fails to post artifacts without GitLab-Workhorse' do it 'fails to post artifacts without GitLab-Workhorse' do
expect(Gitlab::ErrorTracking).to receive(:track_exception).once expect(Gitlab::ErrorTracking).to receive(:track_exception).once
...@@ -1722,15 +1722,14 @@ describe API::Runner, :clean_gitlab_redis_shared_state do ...@@ -1722,15 +1722,14 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
context 'when setting an expire date' do context 'when setting an expire date' do
let(:default_artifacts_expire_in) {} let(:default_artifacts_expire_in) {}
let(:post_data) do let(:post_data) do
{ 'file.path' => file_upload.path, { file: file_upload,
'file.name' => file_upload.original_filename, expire_in: expire_in }
'expire_in' => expire_in }
end end
before do before do
stub_application_setting(default_artifacts_expire_in: default_artifacts_expire_in) stub_application_setting(default_artifacts_expire_in: default_artifacts_expire_in)
post(api("/jobs/#{job.id}/artifacts"), params: post_data, headers: headers_with_token) upload_artifacts(file_upload, headers_with_token, post_data)
end end
context 'when an expire_in is given' do context 'when an expire_in is given' do
...@@ -1783,20 +1782,22 @@ describe API::Runner, :clean_gitlab_redis_shared_state do ...@@ -1783,20 +1782,22 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
let(:stored_artifacts_size) { job.reload.artifacts_size } let(:stored_artifacts_size) { job.reload.artifacts_size }
let(:stored_artifacts_sha256) { job.reload.job_artifacts_archive.file_sha256 } let(:stored_artifacts_sha256) { job.reload.job_artifacts_archive.file_sha256 }
let(:stored_metadata_sha256) { job.reload.job_artifacts_metadata.file_sha256 } let(:stored_metadata_sha256) { job.reload.job_artifacts_metadata.file_sha256 }
let(:file_keys) { post_data.keys }
let(:send_rewritten_field) { true }
before do before do
post(api("/jobs/#{job.id}/artifacts"), params: post_data, headers: headers_with_token) workhorse_finalize_with_multiple_files(
api("/jobs/#{job.id}/artifacts"),
method: :post,
file_keys: file_keys,
params: post_data,
headers: headers_with_token,
send_rewritten_field: send_rewritten_field
)
end end
context 'when posts data accelerated by workhorse is correct' do context 'when posts data accelerated by workhorse is correct' do
let(:post_data) do let(:post_data) { { file: artifacts, metadata: metadata } }
{ 'file.path' => artifacts.path,
'file.name' => artifacts.original_filename,
'file.sha256' => artifacts_sha256,
'metadata.path' => metadata.path,
'metadata.name' => metadata.original_filename,
'metadata.sha256' => metadata_sha256 }
end
it 'stores artifacts and artifacts metadata' do it 'stores artifacts and artifacts metadata' do
expect(response).to have_gitlab_http_status(:created) expect(response).to have_gitlab_http_status(:created)
...@@ -1808,9 +1809,30 @@ describe API::Runner, :clean_gitlab_redis_shared_state do ...@@ -1808,9 +1809,30 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
end end
end end
context 'with a malicious file.path param' do
let(:post_data) { {} }
let(:tmp_file) { Tempfile.new('crafted.file.path') }
let(:url) { "/jobs/#{job.id}/artifacts?file.path=#{tmp_file.path}" }
it 'rejects the request' do
expect(response).to have_gitlab_http_status(:bad_request)
expect(stored_artifacts_size).to be_nil
end
end
context 'when workhorse header is missing' do
let(:post_data) { { file: artifacts, metadata: metadata } }
let(:send_rewritten_field) { false }
it 'rejects the request' do
expect(response).to have_gitlab_http_status(:bad_request)
expect(stored_artifacts_size).to be_nil
end
end
context 'when there is no artifacts file in post data' do context 'when there is no artifacts file in post data' do
let(:post_data) do let(:post_data) do
{ 'metadata' => metadata } { metadata: metadata }
end end
it 'is expected to respond with bad request' do it 'is expected to respond with bad request' do
...@@ -2053,7 +2075,8 @@ describe API::Runner, :clean_gitlab_redis_shared_state do ...@@ -2053,7 +2075,8 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
method: :post, method: :post,
file_key: :file, file_key: :file,
params: params.merge(file: file), params: params.merge(file: file),
headers: headers headers: headers,
send_rewritten_field: true
) )
end end
end end
......
...@@ -133,4 +133,55 @@ describe Groups::DestroyService do ...@@ -133,4 +133,55 @@ describe Groups::DestroyService do
end end
end end
end end
describe 'authorization updates', :sidekiq_inline do
context 'shared groups' do
let!(:shared_group) { create(:group, :private) }
let!(:shared_group_child) { create(:group, :private, parent: shared_group) }
let!(:project) { create(:project, group: shared_group) }
let!(:project_child) { create(:project, group: shared_group_child) }
before do
create(:group_group_link, shared_group: shared_group, shared_with_group: group)
group.refresh_members_authorized_projects
end
it 'updates project authorization' do
expect(user.can?(:read_project, project)).to eq(true)
expect(user.can?(:read_project, project_child)).to eq(true)
destroy_group(group, user, false)
expect(user.can?(:read_project, project)).to eq(false)
expect(user.can?(:read_project, project_child)).to eq(false)
end
end
context 'shared groups in the same group hierarchy' do
let!(:subgroup) { create(:group, :private, parent: group) }
let!(:subgroup_user) { create(:user) }
before do
subgroup.add_user(subgroup_user, Gitlab::Access::MAINTAINER)
create(:group_group_link, shared_group: group, shared_with_group: subgroup)
subgroup.refresh_members_authorized_projects
end
context 'group is deleted' do
it 'updates project authorization' do
expect { destroy_group(group, user, false) }.to(
change { subgroup_user.can?(:read_project, project) }.from(true).to(false))
end
end
context 'subgroup is deleted' do
it 'updates project authorization' do
expect { destroy_group(subgroup, user, false) }.to(
change { subgroup_user.can?(:read_project, project) }.from(true).to(false))
end
end
end
end
end end
...@@ -33,22 +33,36 @@ module WorkhorseHelpers ...@@ -33,22 +33,36 @@ module WorkhorseHelpers
# workhorse_finalize will transform file_key inside params as if it was the finalize call of an inline object storage upload. # workhorse_finalize will transform file_key inside params as if it was the finalize call of an inline object storage upload.
# note that based on the content of the params it can simulate a disc acceleration or an object storage upload # note that based on the content of the params it can simulate a disc acceleration or an object storage upload
def workhorse_finalize(url, method: :post, file_key:, params:, headers: {}, send_rewritten_field: false) def workhorse_finalize(url, method: :post, file_key:, params:, headers: {}, send_rewritten_field: false)
workhorse_request_with_file(method, url, workhorse_finalize_with_multiple_files(url, method: method, file_keys: file_key, params: params, headers: headers, send_rewritten_field: send_rewritten_field)
file_key: file_key, end
params: params,
extra_headers: headers, def workhorse_finalize_with_multiple_files(url, method: :post, file_keys:, params:, headers: {}, send_rewritten_field: false)
send_rewritten_field: send_rewritten_field workhorse_request_with_multiple_files(method, url,
file_keys: file_keys,
params: params,
extra_headers: headers,
send_rewritten_field: send_rewritten_field
) )
end end
def workhorse_request_with_file(method, url, file_key:, params:, env: {}, extra_headers: {}, send_rewritten_field:) def workhorse_request_with_file(method, url, file_key:, params:, env: {}, extra_headers: {}, send_rewritten_field:)
workhorse_request_with_multiple_files(method, url, file_keys: file_key, params: params, env: env, extra_headers: extra_headers, send_rewritten_field: send_rewritten_field)
end
def workhorse_request_with_multiple_files(method, url, file_keys:, params:, env: {}, extra_headers: {}, send_rewritten_field:)
workhorse_params = params.dup workhorse_params = params.dup
file = workhorse_params.delete(file_key)
workhorse_params = workhorse_disk_accelerated_file_params(file_key, file).merge(workhorse_params) file_keys = Array(file_keys)
rewritten_fields = {}
file_keys.each do |key|
file = workhorse_params.delete(key)
rewritten_fields[key] = file.path if file
workhorse_params = workhorse_disk_accelerated_file_params(key, file).merge(workhorse_params)
end
headers = if send_rewritten_field headers = if send_rewritten_field
workhorse_rewritten_fields_header(file_key => file.path) workhorse_rewritten_fields_header(rewritten_fields)
else else
{} {}
end end
...@@ -75,7 +89,11 @@ module WorkhorseHelpers ...@@ -75,7 +89,11 @@ module WorkhorseHelpers
"#{key}.name" => file.original_filename, "#{key}.name" => file.original_filename,
"#{key}.size" => file.size "#{key}.size" => file.size
}.tap do |params| }.tap do |params|
params["#{key}.path"] = file.path if file.path if file.path
params["#{key}.path"] = file.path
params["#{key}.sha256"] = Digest::SHA256.file(file.path).hexdigest
end
params["#{key}.remote_id"] = file.remote_id if file.respond_to?(:remote_id) && file.remote_id.present? params["#{key}.remote_id"] = file.remote_id if file.respond_to?(:remote_id) && file.remote_id.present?
end end
end end
......
# frozen_string_literal: true
#
# This file pulls in the changes in https://github.com/rails/rails/pull/38063
# to fix controller specs updated with the latest Rack versions.
#
# This file should be removed after that change ships. It is not
# present in Rails 6.0.2.2.
module ActionController
class TestRequest < ActionDispatch::TestRequest #:nodoc:
def self.new_session
TestSessionPatched.new
end
end
# Methods #destroy and #load! are overridden to avoid calling methods on the
# @store object, which does not exist for the TestSession class.
class TestSessionPatched < Rack::Session::Abstract::PersistedSecure::SecureSessionHash #:nodoc:
DEFAULT_OPTIONS = Rack::Session::Abstract::Persisted::DEFAULT_OPTIONS
def initialize(session = {})
super(nil, nil)
@id = Rack::Session::SessionId.new(SecureRandom.hex(16))
@data = stringify_keys(session)
@loaded = true
end
def exists?
true
end
def keys
@data.keys
end
def values
@data.values
end
def destroy
clear
end
def fetch(key, *args, &block)
@data.fetch(key.to_s, *args, &block)
end
private
def load!
@id
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment