Commit 490f1902 authored by GitLab Bot's avatar GitLab Bot

Automatic merge of gitlab-org/gitlab master

parents 62d6e81c 61dbcf87
......@@ -113,7 +113,6 @@ linters:
- "app/views/help/instance_configuration.html.haml"
- "app/views/help/instance_configuration/_gitlab_ci.html.haml"
- "app/views/help/instance_configuration/_gitlab_pages.html.haml"
- "app/views/help/ui.html.haml"
- "app/views/import/bitbucket/status.html.haml"
- "app/views/import/bitbucket_server/status.html.haml"
- "app/views/invites/show.html.haml"
......
<script>
import { GlDrawer, GlLabel, GlAvatarLink, GlAvatarLabeled, GlLink } from '@gitlab/ui';
import { GlDrawer, GlLabel } from '@gitlab/ui';
import { mapActions, mapState } from 'vuex';
import { __ } from '~/locale';
import boardsStore from '~/boards/stores/boards_store';
......@@ -15,16 +15,13 @@ export default {
milestone: 'milestone',
label: 'label',
labelListText: __('Label'),
labelMilestoneText: __('Milestone'),
labelAssigneeText: __('Assignee'),
components: {
GlDrawer,
GlLabel,
GlAvatarLink,
GlAvatarLabeled,
GlLink,
BoardSettingsSidebarWipLimit: () =>
import('ee_component/boards/components/board_settings_wip_limit.vue'),
BoardSettingsListTypes: () =>
import('ee_component/boards/components/board_settings_list_types.vue'),
},
computed: {
...mapState(['activeId']),
......@@ -41,30 +38,11 @@ export default {
activeListLabel() {
return this.activeList.label;
},
activeListMilestone() {
return this.activeList.milestone;
},
activeListAssignee() {
return this.activeList.assignee;
},
boardListType() {
return this.activeList.type || null;
},
listTypeTitle() {
switch (this.boardListType) {
case this.$options.milestone: {
return this.$options.labelMilestoneText;
}
case this.$options.label: {
return this.$options.labelListText;
}
case this.$options.assignee: {
return this.$options.labelAssigneeText;
}
default: {
return '';
}
}
return this.$options.labelListText;
},
},
created() {
......@@ -94,32 +72,20 @@ export default {
>
<template #header>{{ $options.listSettingsText }}</template>
<template v-if="isSidebarOpen">
<div class="d-flex flex-column align-items-start">
<label class="js-list-label">{{ listTypeTitle }}</label>
<template v-if="boardListType === $options.label">
<gl-label
:title="activeListLabel.title"
:background-color="activeListLabel.color"
:scoped="showScopedLabels(activeListLabel)"
/>
</template>
<template v-else-if="boardListType === $options.assignee">
<gl-avatar-link class="js-assignee" :href="activeListAssignee.webUrl">
<gl-avatar-labeled
:size="32"
:label="activeListAssignee.name"
:sub-label="`@${activeListAssignee.username}`"
:src="activeListAssignee.avatar"
/>
</gl-avatar-link>
</template>
<template v-else-if="boardListType === $options.milestone">
<gl-link class="js-milestone" :href="activeListMilestone.webUrl">
{{ activeListMilestone.title }}
</gl-link>
</template>
<div v-if="boardListType === $options.label">
<label class="js-list-label gl-display-block">{{ listTypeTitle }}</label>
<gl-label
:title="activeListLabel.title"
:background-color="activeListLabel.color"
:scoped="showScopedLabels(activeListLabel)"
/>
</div>
<board-settings-list-types
v-else
:active-list="activeList"
:board-list-type="boardListType"
/>
<board-settings-sidebar-wip-limit :max-issue-count="activeList.maxIssueCount" />
</template>
</gl-drawer>
......
......@@ -72,18 +72,11 @@ export default {
GlPagination,
GlTabs,
GlTab,
PublishedCell: () => import('ee_component/incidents/components/published_cell.vue'),
},
directives: {
GlTooltip: GlTooltipDirective,
},
inject: [
'projectPath',
'newIssuePath',
'incidentTemplateName',
'issuePath',
'publishedAvailable',
],
inject: ['projectPath', 'newIssuePath', 'incidentTemplateName', 'issuePath'],
apollo: {
incidents: {
query: getIncidents,
......@@ -151,20 +144,6 @@ export default {
newIncidentPath() {
return mergeUrlParams({ issuable_template: this.incidentTemplateName }, this.newIssuePath);
},
availableFields() {
return this.publishedAvailable
? [
...this.$options.fields,
...[
{
key: 'published',
label: s__('IncidentManagement|Published'),
thClass: 'gl-pointer-events-none',
},
],
]
: this.$options.fields;
},
},
methods: {
onInputChange: debounce(function debounceSearch(input) {
......@@ -251,7 +230,7 @@ export default {
</h4>
<gl-table
:items="incidents.list || []"
:fields="availableFields"
:fields="$options.fields"
:show-empty="true"
:busy="loading"
stacked="md"
......@@ -266,7 +245,7 @@ export default {
<gl-icon
v-if="item.state === 'closed'"
name="issue-close"
class="gl-ml-1 gl-fill-blue-500"
class="gl-fill-blue-500"
data-testid="incident-closed"
/>
</div>
......@@ -306,13 +285,6 @@ export default {
</div>
</template>
<template v-if="publishedAvailable" #cell(published)="{ item }">
<published-cell
:status-page-published-incident="item.statusPagePublishedIncident"
:un-published="$options.i18n.unPublished"
/>
</template>
<template #table-busy>
<gl-loading-icon size="lg" color="dark" class="mt-3" />
</template>
......
......@@ -6,7 +6,6 @@ export const I18N = {
unassigned: s__('IncidentManagement|Unassigned'),
createIncidentBtnLabel: s__('IncidentManagement|Create incident'),
searchPlaceholder: __('Search or filter results...'),
unPublished: s__('IncidentManagement|Unpublished'),
};
export const INCIDENT_STATE_TABS = [
......
......@@ -37,7 +37,6 @@ query getIncidents(
webUrl
}
}
statusPagePublishedIncident
}
pageInfo {
hasNextPage
......
......@@ -8,13 +8,7 @@ export default () => {
const selector = '#js-incidents';
const domEl = document.querySelector(selector);
const {
projectPath,
newIssuePath,
incidentTemplateName,
issuePath,
publishedAvailable,
} = domEl.dataset;
const { projectPath, newIssuePath, incidentTemplateName, issuePath } = domEl.dataset;
const apolloProvider = new VueApollo({
defaultClient: createDefaultClient(),
......@@ -27,7 +21,6 @@ export default () => {
incidentTemplateName,
newIssuePath,
issuePath,
publishedAvailable,
},
apolloProvider,
components: {
......
......@@ -98,9 +98,4 @@
@include gl-w-full;
}
}
// TODO: Abstract to `@gitlab/ui` utility set: https://gitlab.com/gitlab-org/gitlab-ui/-/issues/921
.gl-fill-green-500 {
fill: $gray-500;
}
}
......@@ -97,9 +97,6 @@ module Types
field :design_collection, Types::DesignManagement::DesignCollectionType, null: true,
description: 'Collection of design images associated with this issue'
field :status_page_published_incident, GraphQL::BOOLEAN_TYPE, null: true,
description: 'Indicates whether an issue is published to the status page'
end
end
......
......@@ -9,7 +9,7 @@ module MirrorHelper
end
def mirror_lfs_sync_message
_('The Git LFS objects will <strong>not</strong> be synced.').html_safe
html_escape(_('The Git LFS objects will %{strong_open}not%{strong_close} be synced.')) % { strong_open: '<strong>'.html_safe, strong_close: '</strong>'.html_safe }
end
end
......
......@@ -71,7 +71,7 @@ module PreferencesHelper
def language_choices
options_for_select(
Gitlab::I18n::AVAILABLE_LANGUAGES.map(&:reverse).sort,
Gitlab::I18n.selectable_locales.map(&:reverse).sort,
current_user.preferred_language
)
end
......
......@@ -10,5 +10,3 @@ module Projects::IncidentsHelper
}
end
end
Projects::IncidentsHelper.prepend_if_ee('EE::Projects::IncidentsHelper')
......@@ -103,15 +103,15 @@ module Ci
after_create :keep_around_commits, unless: :importing?
# We use `Enums::Ci::Pipeline.sources` here so that EE can more easily extend
# We use `Ci::PipelineEnums.sources` here so that EE can more easily extend
# this `Hash` with new values.
enum_with_nil source: Enums::Ci::Pipeline.sources
enum_with_nil source: ::Ci::PipelineEnums.sources
enum_with_nil config_source: Enums::Ci::Pipeline.config_sources
enum_with_nil config_source: ::Ci::PipelineEnums.config_sources
# We use `Enums::Ci::Pipeline.failure_reasons` here so that EE can more easily
# We use `Ci::PipelineEnums.failure_reasons` here so that EE can more easily
# extend this `Hash` with new values.
enum failure_reason: Enums::Ci::Pipeline.failure_reasons
enum failure_reason: ::Ci::PipelineEnums.failure_reasons
enum locked: { unlocked: 0, artifacts_locked: 1 }
......@@ -253,7 +253,7 @@ module Ci
scope :internal, -> { where(source: internal_sources) }
scope :no_child, -> { where.not(source: :parent_pipeline) }
scope :ci_sources, -> { where(config_source: Enums::Ci::Pipeline.ci_config_sources_values) }
scope :ci_sources, -> { where(config_source: ::Ci::PipelineEnums.ci_config_sources_values) }
scope :for_user, -> (user) { where(user: user) }
scope :for_sha, -> (sha) { where(sha: sha) }
scope :for_source_sha, -> (source_sha) { where(source_sha: source_sha) }
......@@ -1058,7 +1058,7 @@ module Ci
end
def cacheable?
Enums::Ci::Pipeline.ci_config_sources.key?(config_source.to_sym)
Ci::PipelineEnums.ci_config_sources.key?(config_source.to_sym)
end
def source_ref_path
......
# frozen_string_literal: true
module Ci
module PipelineEnums
# Returns the `Hash` to use for creating the `failure_reason` enum for
# `Ci::Pipeline`.
def self.failure_reasons
{
unknown_failure: 0,
config_error: 1,
external_validation_failure: 2
}
end
# Returns the `Hash` to use for creating the `sources` enum for
# `Ci::Pipeline`.
def self.sources
{
unknown: nil,
push: 1,
web: 2,
trigger: 3,
schedule: 4,
api: 5,
external: 6,
# TODO: Rename `pipeline` to `cross_project_pipeline` in 13.0
# https://gitlab.com/gitlab-org/gitlab/issues/195991
pipeline: 7,
chat: 8,
webide: 9,
merge_request_event: 10,
external_pull_request_event: 11,
parent_pipeline: 12,
ondemand_dast_scan: 13
}
end
# Returns the `Hash` to use for creating the `config_sources` enum for
# `Ci::Pipeline`.
def self.config_sources
{
unknown_source: nil,
repository_source: 1,
auto_devops_source: 2,
webide_source: 3,
remote_source: 4,
external_project_source: 5,
bridge_source: 6,
parameter_source: 7
}
end
def self.ci_config_sources
config_sources.slice(
:unknown_source,
:repository_source,
:auto_devops_source,
:remote_source,
:external_project_source
)
end
def self.ci_config_sources_values
ci_config_sources.values
end
def self.non_ci_config_source_values
config_sources.values - ci_config_sources.values
end
end
end
Ci::PipelineEnums.prepend_if_ee('EE::Ci::PipelineEnums')
......@@ -77,9 +77,9 @@ class CommitStatus < ApplicationRecord
merge(or_conditions)
end
# We use `Enums::CommitStatus.failure_reasons` here so that EE can more easily
# We use `CommitStatusEnums.failure_reasons` here so that EE can more easily
# extend this `Hash` with new values.
enum_with_nil failure_reason: Enums::CommitStatus.failure_reasons
enum_with_nil failure_reason: ::CommitStatusEnums.failure_reasons
##
# We still create some CommitStatuses outside of CreatePipelineService.
......
# frozen_string_literal: true
module Enums
module Ci
module Pipeline
# Returns the `Hash` to use for creating the `failure_reason` enum for
# `Ci::Pipeline`.
def self.failure_reasons
{
unknown_failure: 0,
config_error: 1,
external_validation_failure: 2
}
end
# Returns the `Hash` to use for creating the `sources` enum for
# `Ci::Pipeline`.
def self.sources
{
unknown: nil,
push: 1,
web: 2,
trigger: 3,
schedule: 4,
api: 5,
external: 6,
# TODO: Rename `pipeline` to `cross_project_pipeline` in 13.0
# https://gitlab.com/gitlab-org/gitlab/issues/195991
pipeline: 7,
chat: 8,
webide: 9,
merge_request_event: 10,
external_pull_request_event: 11,
parent_pipeline: 12,
ondemand_dast_scan: 13
}
end
# Returns the `Hash` to use for creating the `config_sources` enum for
# `Ci::Pipeline`.
def self.config_sources
{
unknown_source: nil,
repository_source: 1,
auto_devops_source: 2,
webide_source: 3,
remote_source: 4,
external_project_source: 5,
bridge_source: 6,
parameter_source: 7
}
end
def self.ci_config_sources
config_sources.slice(
:unknown_source,
:repository_source,
:auto_devops_source,
:remote_source,
:external_project_source
)
end
def self.ci_config_sources_values
ci_config_sources.values
end
def self.non_ci_config_source_values
config_sources.values - ci_config_sources.values
end
end
end
end
Enums::Ci::Pipeline.prepend_if_ee('EE::Enums::Ci::Pipeline')
# frozen_string_literal: true
module Enums
module CommitStatus
# Returns the Hash to use for creating the `failure_reason` enum for
# `CommitStatus`.
def self.failure_reasons
{
unknown_failure: nil,
script_failure: 1,
api_failure: 2,
stuck_or_timeout_failure: 3,
runner_system_failure: 4,
missing_dependency_failure: 5,
runner_unsupported: 6,
stale_schedule: 7,
job_execution_timeout: 8,
archived_failure: 9,
unmet_prerequisites: 10,
scheduler_failure: 11,
data_integrity_failure: 12,
forward_deployment_failure: 13,
protected_environment_failure: 1_000,
insufficient_bridge_permissions: 1_001,
downstream_bridge_project_not_found: 1_002,
invalid_bridge_trigger: 1_003,
upstream_bridge_project_not_found: 1_004,
insufficient_upstream_permissions: 1_005,
bridge_pipeline_is_child_pipeline: 1_006,
downstream_pipeline_creation_failed: 1_007
}
end
end
end
# frozen_string_literal: true
module Enums
module PrometheusMetric
def self.groups
{
# built-in groups
nginx_ingress_vts: -1,
ha_proxy: -2,
aws_elb: -3,
nginx: -4,
kubernetes: -5,
nginx_ingress: -6,
cluster_health: -100
}.merge(custom_groups).freeze
end
# custom/user groups
def self.custom_groups
{
business: 0,
response: 1,
system: 2
}.freeze
end
def self.group_details
{
# built-in groups
nginx_ingress_vts: {
group_title: _('Response metrics (NGINX Ingress VTS)'),
required_metrics: %w(nginx_upstream_responses_total nginx_upstream_response_msecs_avg),
priority: 10
}.freeze,
nginx_ingress: {
group_title: _('Response metrics (NGINX Ingress)'),
required_metrics: %w(nginx_ingress_controller_requests nginx_ingress_controller_ingress_upstream_latency_seconds_sum),
priority: 10
}.freeze,
ha_proxy: {
group_title: _('Response metrics (HA Proxy)'),
required_metrics: %w(haproxy_frontend_http_requests_total haproxy_frontend_http_responses_total),
priority: 10
}.freeze,
aws_elb: {
group_title: _('Response metrics (AWS ELB)'),
required_metrics: %w(aws_elb_request_count_sum aws_elb_latency_average aws_elb_httpcode_backend_5_xx_sum),
priority: 10
}.freeze,
nginx: {
group_title: _('Response metrics (NGINX)'),
required_metrics: %w(nginx_server_requests nginx_server_requestMsec),
priority: 10
}.freeze,
kubernetes: {
group_title: _('System metrics (Kubernetes)'),
required_metrics: %w(container_memory_usage_bytes container_cpu_usage_seconds_total),
priority: 5
}.freeze,
cluster_health: {
group_title: _('Cluster Health'),
required_metrics: %w(container_memory_usage_bytes container_cpu_usage_seconds_total),
priority: 10
}.freeze
}.merge(custom_group_details).freeze
end
# custom/user groups
def self.custom_group_details
{
business: {
group_title: _('Business metrics (Custom)'),
priority: 0
}.freeze,
response: {
group_title: _('Response metrics (Custom)'),
priority: -5
}.freeze,
system: {
group_title: _('System metrics (Custom)'),
priority: -10
}.freeze
}.freeze
end
end
end
# frozen_string_literal: true
module Enums
module UserCallout
# Returns the `Hash` to use for the `feature_name` enum in the `UserCallout`
# model.
#
# This method is separate from the `UserCallout` model so that it can be
# extended by EE.
#
# If you are going to add new items to this hash, check that you're not going
# to conflict with EE-only values: https://gitlab.com/gitlab-org/gitlab/blob/master/ee/app/models/concerns/ee/enums/user_callout.rb
def self.feature_names
{
gke_cluster_integration: 1,
gcp_signup_offer: 2,
cluster_security_warning: 3,
suggest_popover_dismissed: 9,
tabs_position_highlight: 10,
webhooks_moved: 13,
admin_integrations_moved: 15,
personal_access_token_expiry: 21 # EE-only
}
end
end
end
Enums::UserCallout.prepend_if_ee('EE::Enums::UserCallout')
......@@ -21,7 +21,7 @@ class InternalId < ApplicationRecord
belongs_to :project
belongs_to :namespace
enum usage: Enums::InternalId.usage_resources
enum usage: ::InternalIdEnums.usage_resources
validates :usage, presence: true
......
# frozen_string_literal: true
module Enums
module InternalId
def self.usage_resources
# when adding new resource, make sure it doesn't conflict with EE usage_resources
{
module InternalIdEnums
def self.usage_resources
# when adding new resource, make sure it doesn't conflict with EE usage_resources
{
issues: 0,
merge_requests: 1,
deployments: 2,
......@@ -15,9 +14,8 @@ module Enums
operations_user_lists: 7,
alert_management_alerts: 8,
sprints: 9 # iterations
}
end
}
end
end
Enums::InternalId.prepend_if_ee('EE::Enums::InternalId')
InternalIdEnums.prepend_if_ee('EE::InternalIdEnums')
......@@ -4,7 +4,7 @@ class PrometheusMetric < ApplicationRecord
belongs_to :project, validate: true, inverse_of: :prometheus_metrics
has_many :prometheus_alerts, inverse_of: :prometheus_metric
enum group: Enums::PrometheusMetric.groups
enum group: PrometheusMetricEnums.groups
validates :title, presence: true
validates :query, presence: true
......@@ -72,6 +72,6 @@ class PrometheusMetric < ApplicationRecord
private
def group_details(group)
Enums::PrometheusMetric.group_details.fetch(group.to_sym)
PrometheusMetricEnums.group_details.fetch(group.to_sym)
end
end
# frozen_string_literal: true
module PrometheusMetricEnums
def self.groups
{
# built-in groups
nginx_ingress_vts: -1,
ha_proxy: -2,
aws_elb: -3,
nginx: -4,
kubernetes: -5,
nginx_ingress: -6,
cluster_health: -100
}.merge(custom_groups).freeze
end
# custom/user groups
def self.custom_groups
{
business: 0,
response: 1,
system: 2
}.freeze
end
def self.group_details
{
# built-in groups
nginx_ingress_vts: {
group_title: _('Response metrics (NGINX Ingress VTS)'),
required_metrics: %w(nginx_upstream_responses_total nginx_upstream_response_msecs_avg),
priority: 10
}.freeze,
nginx_ingress: {
group_title: _('Response metrics (NGINX Ingress)'),
required_metrics: %w(nginx_ingress_controller_requests nginx_ingress_controller_ingress_upstream_latency_seconds_sum),
priority: 10
}.freeze,
ha_proxy: {
group_title: _('Response metrics (HA Proxy)'),
required_metrics: %w(haproxy_frontend_http_requests_total haproxy_frontend_http_responses_total),
priority: 10
}.freeze,
aws_elb: {
group_title: _('Response metrics (AWS ELB)'),
required_metrics: %w(aws_elb_request_count_sum aws_elb_latency_average aws_elb_httpcode_backend_5_xx_sum),
priority: 10
}.freeze,
nginx: {
group_title: _('Response metrics (NGINX)'),
required_metrics: %w(nginx_server_requests nginx_server_requestMsec),
priority: 10
}.freeze,
kubernetes: {
group_title: _('System metrics (Kubernetes)'),
required_metrics: %w(container_memory_usage_bytes container_cpu_usage_seconds_total),
priority: 5
}.freeze,
cluster_health: {
group_title: _('Cluster Health'),
required_metrics: %w(container_memory_usage_bytes container_cpu_usage_seconds_total),
priority: 10
}.freeze
}.merge(custom_group_details).freeze
end
# custom/user groups
def self.custom_group_details
{
business: {
group_title: _('Business metrics (Custom)'),
priority: 0
}.freeze,
response: {
group_title: _('Response metrics (Custom)'),
priority: -5
}.freeze,
system: {
group_title: _('System metrics (Custom)'),
priority: -10
}.freeze
}.freeze
end
end
......@@ -3,9 +3,9 @@
class UserCallout < ApplicationRecord
belongs_to :user
# We use `Enums::UserCallout.feature_names` here so that EE can more easily
# We use `UserCalloutEnums.feature_names` here so that EE can more easily
# extend this `Hash` with new values.
enum feature_name: Enums::UserCallout.feature_names
enum feature_name: ::UserCalloutEnums.feature_names
validates :user, presence: true
validates :feature_name,
......
# frozen_string_literal: true
module UserCalloutEnums
# Returns the `Hash` to use for the `feature_name` enum in the `UserCallout`
# model.
#
# This method is separate from the `UserCallout` model so that it can be
# extended by EE.
#
# If you are going to add new items to this hash, check that you're not going
# to conflict with EE-only values: https://gitlab.com/gitlab-org/gitlab/blob/master/ee/app/models/ee/user_callout_enums.rb
def self.feature_names
{
gke_cluster_integration: 1,
gcp_signup_offer: 2,
cluster_security_warning: 3,
suggest_popover_dismissed: 9,
tabs_position_highlight: 10,
webhooks_moved: 13,
admin_integrations_moved: 15,
personal_access_token_expiry: 21 # EE-only
}
end
end
UserCalloutEnums.prepend_if_ee('EE::UserCalloutEnums')
......@@ -478,6 +478,8 @@ class ProjectPolicy < BasePolicy
enable :read_note
enable :read_pipeline
enable :read_pipeline_schedule
enable :read_environment
enable :read_deployment
enable :read_commit_status
enable :read_container_image
enable :download_code
......
......@@ -147,7 +147,7 @@ class BuildDetailsEntity < JobEntity
end
def help_message(docs_url)
_("Please refer to <a href=\"%{docs_url}\">%{docs_url}</a>") % { docs_url: docs_url }
html_escape(_("Please refer to %{docs_url}")) % { docs_url: "<a href=\"#{docs_url}\">#{html_escape(docs_url)}</a>".html_safe }
end
end
......
......@@ -31,7 +31,7 @@ module Metrics
# A group title is valid if it is one of the limited
# options the user can select in the UI.
def valid_group_title?(group)
Enums::PrometheusMetric
PrometheusMetricEnums
.custom_group_details
.map { |_, details| details[:group_title] }
.include?(group)
......@@ -100,12 +100,12 @@ module Metrics
# Returns a symbol representing the group that
# the dashboard's group title belongs to.
# It will be one of the keys found under
# Enums::PrometheusMetric.custom_groups.
# PrometheusMetricEnums.custom_groups.
#
# @return [String]
def group_key
strong_memoize(:group_key) do
Enums::PrometheusMetric
PrometheusMetricEnums
.group_details
.find { |_, details| details[:group_title] == group }
.first
......
= _('Environment variables are applied to environments via the runner. They can be protected by only exposing them to protected branches or tags. Additionally, they can be masked so they are hidden in job logs, though they must match certain regexp requirements to do so. You can use environment variables for passwords, secret keys, or whatever you want.')
= _('You may also add variables that are made available to the running application by prepending the variable key with <code>K8S_SECRET_</code>.').html_safe
= html_escape(_('You may also add variables that are made available to the running application by prepending the variable key with %{k8s_secret}.')) % { k8s_secret: tag.code('K8S_SECRET_') }
= link_to _('More information'), help_page_path('ci/variables/README', anchor: 'custom-environment-variables')
......@@ -11,7 +11,7 @@
= @cluster.provider_label
%p
- provider_link = link_to(@cluster.provider_label, @cluster.provider_management_url, target: '_blank', rel: 'noopener noreferrer')
= s_('ClusterIntegration|Manage your Kubernetes cluster by visiting %{provider_link}').html_safe % { provider_link: provider_link }
= html_escape(s_('ClusterIntegration|Manage your Kubernetes cluster by visiting %{provider_link}')) % { provider_link: provider_link }
.sub-section.form-group
= form_for @cluster, url: clusterable.cluster_path(@cluster), as: :cluster, html: { class: 'cluster_management_form' } do |field|
......@@ -22,7 +22,7 @@
= project_select_tag('cluster[management_project_id]', class: 'hidden-filter-value', toggle_class: 'js-project-search js-project-filter js-filter-submit', dropdown_class: 'dropdown-menu-selectable dropdown-menu-project js-filter-submit',
placeholder: _('Select project'), idAttribute: 'id', data: { order_by: 'last_activity_at', idattribute: 'id', simple_filter: true, allow_clear: true, include_groups: false, include_projects_in_subgroups: true, group_id: group_id, user_id: user_id }, value: @cluster.management_project_id)
.text-muted
= s_('ClusterIntegration|A cluster management project can be used to run deployment jobs with Kubernetes <code>cluster-admin</code> privileges.').html_safe
= html_escape(s_('ClusterIntegration|A cluster management project can be used to run deployment jobs with Kubernetes %{code_open}cluster-admin%{code_close} privileges.')) % { code_open: '<code>'.html_safe, code_close: '</code>'.html_safe }
= link_to _('More information'), help_page_path('user/clusters/management_project.md'), target: '_blank'
.gl-display-flex.gl-justify-content-end
= field.submit _('Save changes'), class: 'btn btn-success'
......
......@@ -3,7 +3,7 @@
.row
.col-lg-7
%h1.mb-3.font-weight-bold.text-6.mt-0
= _("Speed up your DevOps<br>with GitLab").html_safe
= html_escape(_("Speed up your DevOps%{br_tag}with GitLab")) % { br_tag: '<br/>'.html_safe }
%p.text-3
= _("GitLab is a single application for the entire software development lifecycle. From project planning and source code management to CI/CD, monitoring, and security.")
.col-lg-5.order-12
......
......@@ -13,7 +13,7 @@
= _('Use one line per URI')
- if Doorkeeper.configuration.native_redirect_uri
%span.form-text.text-muted
= _('Use <code>%{native_redirect_uri}</code> for local tests').html_safe % { native_redirect_uri: Doorkeeper.configuration.native_redirect_uri }
= html_escape(_('Use %{native_redirect_uri} for local tests')) % { native_redirect_uri: tag.code(Doorkeeper.configuration.native_redirect_uri) }
.form-group.form-check
= f.check_box :confidential, class: 'form-check-input'
......
......@@ -11,7 +11,7 @@
.text-warning
%p
= icon("exclamation-triangle fw")
= _('You are an admin, which means granting access to <strong>%{client_name}</strong> will allow them to interact with GitLab as an admin as well. Proceed with caution.').html_safe % { client_name: @pre_auth.client.name }
= html_escape(_('You are an admin, which means granting access to %{client_name} will allow them to interact with GitLab as an admin as well. Proceed with caution.')) % { client_name: tag.strong(@pre_auth.client.name) }
%p
- link_to_client = link_to(@pre_auth.client.name, @pre_auth.redirect_uri, target: '_blank', rel: 'noopener noreferrer')
= _("An application called %{link_to_client} is requesting access to your GitLab account.").html_safe % { link_to_client: link_to_client }
......
......@@ -9,7 +9,7 @@
- if ssh_info.blank?
%p
= _('SSH host keys are not available on this system. Please use <code>ssh-keyscan</code> command or contact your GitLab administrator for more information.').html_safe
= html_escape(_('SSH host keys are not available on this system. Please use %{ssh_keyscan} command or contact your GitLab administrator for more information.')) % { ssh_keyscan: tag.code('ssh-keyscan') }
- else
%p
= _('Below are the fingerprints for the current instance SSH host keys.')
......
This diff is collapsed.
- content_for(:page_title, _('Welcome to GitLab %{name}!') % { name: current_user.name })
.text-center.mb-3
= _('In order to tailor your experience with GitLab we<br>would like to know a bit more about you.').html_safe
= html_escape(_('In order to tailor your experience with GitLab we%{br_tag}would like to know a bit more about you.')) % { br_tag: '<br/>'.html_safe }
.signup-box.p-3.mb-2
.signup-body
= form_for(current_user, url: users_sign_up_update_registration_path, html: { class: 'new_new_user gl-show-field-errors', 'aria-live' => 'assertive' }) do |f|
......
......@@ -8,7 +8,7 @@
.modal-body
%p
= _('<strong>%{label_name}</strong> <span>will be permanently deleted from %{subject_name}. This cannot be undone.</span>').html_safe % { label_name: label.name, subject_name: label.subject_name }
= html_escape(_('%{label_name} %{span_open}will be permanently deleted from %{subject_name}. This cannot be undone.%{span_close}')) % { label_name: tag.strong(label.name), subject_name: label.subject_name, span_open: '<span>'.html_safe, span_close: '</span>'.html_safe }
.modal-footer
%a{ href: '#', data: { dismiss: 'modal' }, class: 'btn btn-default' }= _('Cancel')
......
......@@ -27,6 +27,6 @@
%span.issuable-note-warning
= sprite_icon('lock', size: 16, css_class: 'icon')
%span
= _("This %{issuable} is locked. Only <strong>project members</strong> can comment.").html_safe % { issuable: issuable.class.to_s.titleize.downcase }
= html_escape(_("This %{issuable} is locked. Only %{strong_open}project members%{strong_close} can comment.")) % { issuable: issuable.class.to_s.titleize.downcase, strong_open: '<strong>'.html_safe, strong_close: '</strong>'.html_safe }
-# haml-lint:disable InlineJavaScript
%script.js-notes-data{ type: "application/json" }= initial_notes_data(autocomplete).to_json.html_safe
......@@ -59,7 +59,7 @@
- link_example = '[[page-slug]]'
- else
- link_example = '[Link Title](page-slug)'
= (s_('WikiMarkdownTip|To link to a (new) page, simply type <code class="js-markup-link-example">%{link_example}</code>') % { link_example: link_example }).html_safe
= html_escape(s_('WikiMarkdownTip|To link to a (new) page, simply type %{link_example}')) % { link_example: tag.code(link_example, class: 'js-markup-link-example') }
= succeed '.' do
- markdown_link = link_to s_("WikiMarkdownDocs|documentation"), help_page_path('user/markdown', anchor: 'wiki-specific-markdown')
= (s_("WikiMarkdownDocs|More examples are in the %{docs_link}") % { docs_link: markdown_link }).html_safe
......
%h4.prepend-top-20
= _("Contributions for <strong>%{calendar_date}</strong>").html_safe % { calendar_date: @calendar_date.to_s(:medium) }
= html_escape(_("Contributions for %{calendar_date}")) % { calendar_date: tag.strong(@calendar_date.to_s(:medium)) }
- if @events.any?
%ul.bordered-list
......
---
title: Removes the old UI page
merge_request: 38277
author:
type: other
---
title: Fix review app links are not shown in MR widgets in public projects
merge_request: 37923
author:
type: fixed
---
title: Hide languages with few translations
merge_request: 38312
author:
type: changed
get 'help' => 'help#index'
get 'help/shortcuts' => 'help#shortcuts'
get 'help/ui' => 'help#ui'
get 'help/instance_configuration' => 'help#instance_configuration'
get 'help/*path' => 'help#show', as: :help_page
......@@ -13,7 +13,7 @@ class DeleteUserCalloutAlertsMoved < ActiveRecord::Migration[6.0]
BATCH_SIZE = 1_000
# Inlined from Enums::UserCallout.feature_names
# Inlined from UserCalloutEnums.feature_names
FEATURE_NAME_ALERTS_MOVED = 20
def up
......
---
type: reference
---
# How to set up Consul **(PREMIUM ONLY)**
A Consul cluster consists of both
[server and client agents](https://www.consul.io/docs/agent).
The servers run on their own nodes and the clients run on other nodes that in
turn communicate with the servers.
GitLab Premium includes a bundled version of [Consul](https://www.consul.io/)
a service networking solution that you can manage by using `/etc/gitlab/gitlab.rb`.
## Configure the Consul nodes
NOTE: **Important:**
Before proceeding, refer to the
[available reference architectures](reference_architectures/index.md#available-reference-architectures)
to find out how many Consul server nodes you should have.
On **each** Consul server node perform the following:
1. Follow the instructions to [install](https://about.gitlab.com/install/)
GitLab by choosing your preferred platform, but do not supply the
`EXTERNAL_URL` value when asked.
1. Edit `/etc/gitlab/gitlab.rb`, and add the following by replacing the values
noted in the `retry_join` section. In the example below, there are three
nodes, two denoted with their IP, and one with its FQDN, you can use either
notation:
```ruby
# Disable all components except Consul
roles ['consul_role']
# Consul nodes: can be FQDN or IP, separated by a whitespace
consul['configuration'] = {
server: true,
retry_join: %w(10.10.10.1 consul1.gitlab.example.com 10.10.10.2)
}
# Disable auto migrations
gitlab_rails['auto_migrate'] = false
```
1. [Reconfigure GitLab](restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes
to take effect.
1. Run the following command to ensure Consul is both configured correctly and
to verify that all server nodes are communicating:
```shell
sudo /opt/gitlab/embedded/bin/consul members
```
The output should be similar to:
```plaintext
Node Address Status Type Build Protocol DC
CONSUL_NODE_ONE XXX.XXX.XXX.YYY:8301 alive server 0.9.2 2 gitlab_consul
CONSUL_NODE_TWO XXX.XXX.XXX.YYY:8301 alive server 0.9.2 2 gitlab_consul
CONSUL_NODE_THREE XXX.XXX.XXX.YYY:8301 alive server 0.9.2 2 gitlab_consul
```
If the results display any nodes with a status that isn't `alive`, or if any
of the three nodes are missing, see the [Troubleshooting section](#troubleshooting-consul).
## Upgrade the Consul nodes
To upgrade your Consul nodes, upgrade the GitLab package.
Nodes should be:
- Members of a healthy cluster prior to upgrading the Omnibus GitLab package.
- Upgraded one node at a time.
Identify any existing health issues in the cluster by running the following command
within each node. The command will return an empty array if the cluster is healthy:
```shell
curl http://127.0.0.1:8500/v1/health/state/critical
```
Consul nodes communicate using the raft protocol. If the current leader goes
offline, there needs to be a leader election. A leader node must exist to facilitate
synchronization across the cluster. If too many nodes go offline at the same time,
the cluster will lose quorum and not elect a leader due to
[broken consensus](https://www.consul.io/docs/internals/consensus.html).
Consult the [troubleshooting section](#troubleshooting-consul) if the cluster is not
able to recover after the upgrade. The [outage recovery](#outage-recovery) may
be of particular interest.
NOTE: **Note:**
GitLab uses Consul to store only transient data that is easily regenerated. If
the bundled Consul was not used by any process other than GitLab itself, then
[rebuilding the cluster from scratch](#recreate-from-scratch) is fine.
## Troubleshooting Consul
Below are some useful operations should you need to debug any issues.
You can see any error logs by running:
```shell
sudo gitlab-ctl tail consul
```
### Check the cluster membership
To determine which nodes are part of the cluster, run the following on any member in the cluster:
```shell
sudo /opt/gitlab/embedded/bin/consul members
```
The output should be similar to:
```plaintext
Node Address Status Type Build Protocol DC
consul-b XX.XX.X.Y:8301 alive server 0.9.0 2 gitlab_consul
consul-c XX.XX.X.Y:8301 alive server 0.9.0 2 gitlab_consul
consul-c XX.XX.X.Y:8301 alive server 0.9.0 2 gitlab_consul
db-a XX.XX.X.Y:8301 alive client 0.9.0 2 gitlab_consul
db-b XX.XX.X.Y:8301 alive client 0.9.0 2 gitlab_consul
```
Ideally all nodes will have a `Status` of `alive`.
### Restart Consul
If it is necessary to restart Consul, it is important to do this in
a controlled manner to maintain quorum. If quorum is lost, to recover the cluster,
you will need to follow the Consul [outage recovery](#outage-recovery) process.
To be safe, it's recommended that you only restart Consul in one node at a time to
ensure the cluster remains intact. For larger clusters, it is possible to restart
multiple nodes at a time. See the
[Consul consensus document](https://www.consul.io/docs/internals/consensus.html#deployment-table)
for how many failures it can tolerate. This will be the number of simultaneous
restarts it can sustain.
To restart Consul:
```shell
sudo gitlab-ctl restart consul
```
### Consul nodes unable to communicate
By default, Consul will attempt to
[bind](https://www.consul.io/docs/agent/options.html#_bind) to `0.0.0.0`, but
it will advertise the first private IP address on the node for other Consul nodes
to communicate with it. If the other nodes cannot communicate with a node on
this address, then the cluster will have a failed status.
If you are running into this issue, you will see messages like the following in `gitlab-ctl tail consul` output:
```plaintext
2017-09-25_19:53:39.90821 2017/09/25 19:53:39 [WARN] raft: no known peers, aborting election
2017-09-25_19:53:41.74356 2017/09/25 19:53:41 [ERR] agent: failed to sync remote state: No cluster leader
```
To fix this:
1. Pick an address on each node that all of the other nodes can reach this node through.
1. Update your `/etc/gitlab/gitlab.rb`
```ruby
consul['configuration'] = {
...
bind_addr: 'IP ADDRESS'
}
```
1. Reconfigure GitLab;
```shell
gitlab-ctl reconfigure
```
If you still see the errors, you may have to
[erase the Consul database and reinitialize](#recreate-from-scratch) on the affected node.
### Consul does not start - multiple private IPs
In case that a node has multiple private IPs, Consul will be confused as to
which of the private addresses to advertise, and then immediately exit on start.
You will see messages like the following in `gitlab-ctl tail consul` output:
```plaintext
2017-11-09_17:41:45.52876 ==> Starting Consul agent...
2017-11-09_17:41:45.53057 ==> Error creating agent: Failed to get advertise address: Multiple private IPs found. Please configure one.
```
To fix this:
1. Pick an address on the node that all of the other nodes can reach this node through.
1. Update your `/etc/gitlab/gitlab.rb`
```ruby
consul['configuration'] = {
...
bind_addr: 'IP ADDRESS'
}
```
1. Reconfigure GitLab;
```shell
gitlab-ctl reconfigure
```
### Outage recovery
If you lost enough Consul nodes in the cluster to break quorum, then the cluster
is considered failed, and it will not function without manual intervention.
In that case, you can either recreate the nodes from scratch or attempt a
recover.
#### Recreate from scratch
By default, GitLab does not store anything in the Consul node that cannot be
recreated. To erase the Consul database and reinitialize:
```shell
sudo gitlab-ctl stop consul
sudo rm -rf /var/opt/gitlab/consul/data
sudo gitlab-ctl start consul
```
After this, the node should start back up, and the rest of the server agents rejoin.
Shortly after that, the client agents should rejoin as well.
#### Recover a failed node
If you have taken advantage of Consul to store other data and want to restore
the failed node, follow the
[Consul guide](https://learn.hashicorp.com/consul/day-2-operations/outage)
to recover a failed cluster.
......@@ -137,7 +137,7 @@ documentation:
synchronized from the **primary** node.
NOTE: **Note:**
[NFS](../../high_availability/nfs.md) can be used in place of Gitaly but is not
[NFS](../../nfs.md) can be used in place of Gitaly but is not
recommended.
### Step 2: Configure the main read-only replica PostgreSQL database on the **secondary** node
......@@ -304,9 +304,9 @@ In the architecture overview, there are two machines running the GitLab
application services. These services are enabled selectively in the
configuration.
Configure the application servers following
[Configuring GitLab for multiple nodes](../../high_availability/gitlab.md), then make the
following modifications:
Configure the GitLab Rails application servers following the relevant steps
outlined in the [reference architectures](../../reference_architectures/index.md),
then make the following modifications:
1. Edit `/etc/gitlab/gitlab.rb` on each application server in the **secondary**
cluster, and add the following:
......@@ -393,7 +393,7 @@ application servers.
In this topology, a load balancer is required at each geographic location to
route traffic to the application servers.
See [Load Balancer for GitLab with multiple nodes](../../high_availability/load_balancer.md) for
See [Load Balancer for GitLab with multiple nodes](../../load_balancer.md) for
more information.
### Step 6: Configure the backend application servers on the **secondary** node
......
......@@ -376,7 +376,7 @@ This can be risky because anything that prevents your Gitaly clients from reachi
servers will cause all Gitaly requests to fail. For example, any sort of network, firewall, or name
resolution problems.
Additionally, you must [disable Rugged](../high_availability/nfs.md#improving-nfs-performance-with-gitlab)
Additionally, you must [disable Rugged](../nfs.md#improving-nfs-performance-with-gitlab)
if previously enabled manually.
Gitaly makes the following assumptions:
......
......@@ -2,4 +2,4 @@
redirect_to: 'database.md'
---
This document was moved to [another location](database.md).
This document was moved to [another location](../postgresql/index.md).
---
type: reference
redirect_to: ../consul.md
---
# Working with the bundled Consul service **(PREMIUM ONLY)**
As part of its High Availability stack, GitLab Premium includes a bundled version of [Consul](https://www.consul.io/) that can be managed through `/etc/gitlab/gitlab.rb`. Consul is a service networking solution. When it comes to [GitLab Architecture](../../development/architecture.md), Consul utilization is supported for configuring:
1. [Monitoring in Scaled and Highly Available environments](monitoring_node.md)
1. [PostgreSQL High Availability with Omnibus](../postgresql/replication_and_failover.md)
A Consul cluster consists of multiple server agents, as well as client agents that run on other nodes which need to talk to the Consul cluster.
## Prerequisites
First, make sure to [download/install](https://about.gitlab.com/install/)
Omnibus GitLab **on each node**.
Choose an installation method, then make sure you complete steps:
1. Install and configure the necessary dependencies.
1. Add the GitLab package repository and install the package.
When installing the GitLab package, do not supply `EXTERNAL_URL` value.
## Configuring the Consul nodes
On each Consul node perform the following:
1. Make sure you collect [`CONSUL_SERVER_NODES`](../postgresql/replication_and_failover.md#consul-information), which are the IP addresses or DNS records of the Consul server nodes, for the next step, before executing the next step.
1. Edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
```ruby
# Disable all components except Consul
roles ['consul_role']
# START user configuration
# Replace placeholders:
#
# Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z
# with the addresses gathered for CONSUL_SERVER_NODES
consul['configuration'] = {
server: true,
retry_join: %w(Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z)
}
# Disable auto migrations
gitlab_rails['auto_migrate'] = false
#
# END user configuration
```
> `consul_role` was introduced with GitLab 10.3
1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes
to take effect.
### Consul checkpoint
Before moving on, make sure Consul is configured correctly. Run the following
command to verify all server nodes are communicating:
```shell
/opt/gitlab/embedded/bin/consul members
```
The output should be similar to:
```plaintext
Node Address Status Type Build Protocol DC
CONSUL_NODE_ONE XXX.XXX.XXX.YYY:8301 alive server 0.9.2 2 gitlab_consul
CONSUL_NODE_TWO XXX.XXX.XXX.YYY:8301 alive server 0.9.2 2 gitlab_consul
CONSUL_NODE_THREE XXX.XXX.XXX.YYY:8301 alive server 0.9.2 2 gitlab_consul
```
If any of the nodes isn't `alive` or if any of the three nodes are missing,
check the [Troubleshooting section](#troubleshooting) before proceeding.
## Operations
### Checking cluster membership
To see which nodes are part of the cluster, run the following on any member in the cluster
```shell
$ /opt/gitlab/embedded/bin/consul members
Node Address Status Type Build Protocol DC
consul-b XX.XX.X.Y:8301 alive server 0.9.0 2 gitlab_consul
consul-c XX.XX.X.Y:8301 alive server 0.9.0 2 gitlab_consul
consul-c XX.XX.X.Y:8301 alive server 0.9.0 2 gitlab_consul
db-a XX.XX.X.Y:8301 alive client 0.9.0 2 gitlab_consul
db-b XX.XX.X.Y:8301 alive client 0.9.0 2 gitlab_consul
```
Ideally all nodes will have a `Status` of `alive`.
### Restarting the server cluster
NOTE: **Note:**
This section only applies to server agents. It is safe to restart client agents whenever needed.
If it is necessary to restart the server cluster, it is important to do this in a controlled fashion in order to maintain quorum. If quorum is lost, you will need to follow the Consul [outage recovery](#outage-recovery) process to recover the cluster.
To be safe, we recommend you only restart one server agent at a time to ensure the cluster remains intact.
For larger clusters, it is possible to restart multiple agents at a time. See the [Consul consensus document](https://www.consul.io/docs/internals/consensus.html#deployment-table) for how many failures it can tolerate. This will be the number of simultaneous restarts it can sustain.
## Upgrades for bundled Consul
Nodes running GitLab-bundled Consul should be:
- Members of a healthy cluster prior to upgrading the Omnibus GitLab package.
- Upgraded one node at a time.
NOTE: **Note:**
Running `curl http://127.0.0.1:8500/v1/health/state/critical` from any Consul node will identify existing health issues in the cluster. The command will return an empty array if the cluster is healthy.
Consul clusters communicate using the raft protocol. If the current leader goes offline, there needs to be a leader election. A leader node must exist to facilitate synchronization across the cluster. If too many nodes go offline at the same time, the cluster will lose quorum and not elect a leader due to [broken consensus](https://www.consul.io/docs/internals/consensus.html).
Consult the [troubleshooting section](#troubleshooting) if the cluster is not able to recover after the upgrade. The [outage recovery](#outage-recovery) may be of particular interest.
NOTE: **Note:**
GitLab only uses Consul to store transient data that is easily regenerated. If the bundled Consul was not used by any process other than GitLab itself, then [rebuilding the cluster from scratch](#recreate-from-scratch) is fine.
## Troubleshooting
### Consul server agents unable to communicate
By default, the server agents will attempt to [bind](https://www.consul.io/docs/agent/options.html#_bind) to '0.0.0.0', but they will advertise the first private IP address on the node for other agents to communicate with them. If the other nodes cannot communicate with a node on this address, then the cluster will have a failed status.
You will see messages like the following in `gitlab-ctl tail consul` output if you are running into this issue:
```plaintext
2017-09-25_19:53:39.90821 2017/09/25 19:53:39 [WARN] raft: no known peers, aborting election
2017-09-25_19:53:41.74356 2017/09/25 19:53:41 [ERR] agent: failed to sync remote state: No cluster leader
```
To fix this:
1. Pick an address on each node that all of the other nodes can reach this node through.
1. Update your `/etc/gitlab/gitlab.rb`
```ruby
consul['configuration'] = {
...
bind_addr: 'IP ADDRESS'
}
```
1. Run `gitlab-ctl reconfigure`
If you still see the errors, you may have to [erase the Consul database and reinitialize](#recreate-from-scratch) on the affected node.
### Consul agents do not start - Multiple private IPs
In the case that a node has multiple private IPs the agent be confused as to which of the private addresses to advertise, and then immediately exit on start.
You will see messages like the following in `gitlab-ctl tail consul` output if you are running into this issue:
```plaintext
2017-11-09_17:41:45.52876 ==> Starting Consul agent...
2017-11-09_17:41:45.53057 ==> Error creating agent: Failed to get advertise address: Multiple private IPs found. Please configure one.
```
To fix this:
1. Pick an address on the node that all of the other nodes can reach this node through.
1. Update your `/etc/gitlab/gitlab.rb`
```ruby
consul['configuration'] = {
...
bind_addr: 'IP ADDRESS'
}
```
1. Run `gitlab-ctl reconfigure`
### Outage recovery
If you lost enough server agents in the cluster to break quorum, then the cluster is considered failed, and it will not function without manual intervention.
#### Recreate from scratch
By default, GitLab does not store anything in the Consul cluster that cannot be recreated. To erase the Consul database and reinitialize
```shell
gitlab-ctl stop consul
rm -rf /var/opt/gitlab/consul/data
gitlab-ctl start consul
```
After this, the cluster should start back up, and the server agents rejoin. Shortly after that, the client agents should rejoin as well.
#### Recover a failed cluster
If you have taken advantage of Consul to store other data, and want to restore the failed cluster, please follow the [Consul guide](https://learn.hashicorp.com/consul/day-2-operations/outage) to recover a failed cluster.
This document was moved to [another location](../consul.md).
---
stage: Create
group: Gitaly
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#designated-technical-writers
type: reference
redirect_to: ../gitaly/index.md
---
# Configuring Gitaly for Scaled and High Availability
A [Gitaly Cluster](../gitaly/praefect.md) can be used to increase the fault
tolerance of Gitaly in high availability configurations.
This document is relevant for [scalable and highly available setups](../reference_architectures/index.md).
## Running Gitaly on its own server
See [Run Gitaly on its own server](../gitaly/index.md#run-gitaly-on-its-own-server)
in Gitaly documentation.
Continue configuration of other components by going back to the
[reference architecture](../reference_architectures/index.md#configure-gitlab-to-scale) page.
## Enable Monitoring
> [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/3786) in GitLab 12.0.
1. Make sure to collect [`CONSUL_SERVER_NODES`](../postgresql/replication_and_failover.md#consul-information), which are the IP addresses or DNS records of the Consul server nodes, for the next step. Note they are presented as `Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z`
1. Create/edit `/etc/gitlab/gitlab.rb` and add the following configuration:
```ruby
# Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# Replace placeholders
# Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z
# with the addresses of the Consul server nodes
consul['configuration'] = {
retry_join: %w(Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z),
}
# Set the network addresses that the exporters will listen on
node_exporter['listen_address'] = '0.0.0.0:9100'
gitaly['prometheus_listen_addr'] = "0.0.0.0:9236"
```
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->
This document was moved to [another location](../gitaly/index.md).
---
type: reference
redirect_to: ../reference_architectures/index.md
---
# Configuring GitLab application (Rails)
This section describes how to configure the GitLab application (Rails) component.
NOTE: **Note:**
There is some additional configuration near the bottom for
additional GitLab application servers. It's important to read and understand
these additional steps before proceeding with GitLab installation.
NOTE: **Note:**
[Cloud Object Storage service](object_storage.md) with [Gitaly](gitaly.md)
is recommended over [NFS](nfs.md) wherever possible for improved performance.
1. If necessary, install the NFS client utility packages using the following
commands:
```shell
# Ubuntu/Debian
apt-get install nfs-common
# CentOS/Red Hat
yum install nfs-utils nfs-utils-lib
```
1. Specify the necessary NFS exports in `/etc/fstab`.
The exact contents of `/etc/fstab` will depend on how you chose
to configure your NFS server. See [NFS documentation](nfs.md#nfs-client-mount-options)
for examples and the various options.
1. Create the shared directories. These may be different depending on your NFS
mount locations.
```shell
mkdir -p /var/opt/gitlab/.ssh /var/opt/gitlab/gitlab-rails/uploads /var/opt/gitlab/gitlab-rails/shared /var/opt/gitlab/gitlab-ci/builds /var/opt/gitlab/git-data
```
1. Download/install Omnibus GitLab using **steps 1 and 2** from
[GitLab downloads](https://about.gitlab.com/install/). Do not complete other
steps on the download page.
1. Create/edit `/etc/gitlab/gitlab.rb` and use the following configuration.
Be sure to change the `external_url` to match your eventual GitLab front-end
URL. Depending your the NFS configuration, you may need to change some GitLab
data locations. See [NFS documentation](nfs.md) for `/etc/gitlab/gitlab.rb`
configuration values for various scenarios. The example below assumes you've
added NFS mounts in the default data locations. Additionally the UID and GIDs
given are just examples and you should configure with your preferred values.
```ruby
external_url 'https://gitlab.example.com'
# Prevent GitLab from starting if NFS data mounts are not available
high_availability['mountpoint'] = '/var/opt/gitlab/git-data'
# Disable components that will not be on the GitLab application server
roles ['application_role']
nginx['enable'] = true
# PostgreSQL connection details
gitlab_rails['db_adapter'] = 'postgresql'
gitlab_rails['db_encoding'] = 'unicode'
gitlab_rails['db_host'] = '10.1.0.5' # IP/hostname of database server
gitlab_rails['db_password'] = 'DB password'
# Redis connection details
gitlab_rails['redis_port'] = '6379'
gitlab_rails['redis_host'] = '10.1.0.6' # IP/hostname of Redis server
gitlab_rails['redis_password'] = 'Redis Password'
# Ensure UIDs and GIDs match between servers for permissions via NFS
user['uid'] = 9000
user['gid'] = 9000
web_server['uid'] = 9001
web_server['gid'] = 9001
registry['uid'] = 9002
registry['gid'] = 9002
```
1. [Enable monitoring](#enable-monitoring)
NOTE: **Note:**
To maintain uniformity of links across HA clusters, the `external_url`
on the first application server as well as the additional application
servers should point to the external URL that users will use to access GitLab.
In a typical HA setup, this will be the URL of the load balancer which will
route traffic to all GitLab application servers in the HA cluster.
NOTE: **Note:**
When you specify `https` in the `external_url`, as in the example
above, GitLab assumes you have SSL certificates in `/etc/gitlab/ssl/`. If
certificates are not present, NGINX will fail to start. See
[NGINX documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https)
for more information.
NOTE: **Note:**
It is best to set the `uid` and `gid`s prior to the initial reconfigure
of GitLab. Omnibus will not recursively `chown` directories if set after the initial reconfigure.
## First GitLab application server
On the first application server, run:
```shell
sudo gitlab-ctl reconfigure
```
This should compile the configuration and initialize the database. Do
not run this on additional application servers until the next step.
## Extra configuration for additional GitLab application servers
Additional GitLab servers (servers configured **after** the first GitLab server)
need some extra configuration.
1. Configure shared secrets. These values can be obtained from the primary
GitLab server in `/etc/gitlab/gitlab-secrets.json`. Copy this file to the
secondary servers **prior to** running the first `reconfigure` in the steps
above.
```ruby
gitlab_shell['secret_token'] = 'fbfb19c355066a9afb030992231c4a363357f77345edd0f2e772359e5be59b02538e1fa6cae8f93f7d23355341cea2b93600dab6d6c3edcdced558fc6d739860'
gitlab_rails['otp_key_base'] = 'b719fe119132c7810908bba18315259ed12888d4f5ee5430c42a776d840a396799b0a5ef0a801348c8a357f07aa72bbd58e25a84b8f247a25c72f539c7a6c5fa'
gitlab_rails['secret_key_base'] = '6e657410d57c71b4fc3ed0d694e7842b1895a8b401d812c17fe61caf95b48a6d703cb53c112bc01ebd197a85da81b18e29682040e99b4f26594772a4a2c98c6d'
gitlab_rails['db_key_base'] = 'bf2e47b68d6cafaef1d767e628b619365becf27571e10f196f98dc85e7771042b9203199d39aff91fcb6837c8ed83f2a912b278da50999bb11a2fbc0fba52964'
```
1. Run `touch /etc/gitlab/skip-auto-reconfigure` to prevent database migrations
from running on upgrade. Only the primary GitLab application server should
handle migrations.
1. **Recommended** Configure host keys. Copy the contents (private and public keys) of `/etc/ssh/` on
the primary application server to `/etc/ssh` on all secondary servers. This
prevents false man-in-the-middle-attack alerts when accessing servers in your
High Availability cluster behind a load balancer.
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
NOTE: **Note:**
You will need to restart the GitLab applications nodes after an update has occurred and database
migrations performed.
## Enable Monitoring
> [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/3786) in GitLab 12.0.
If you enable Monitoring, it must be enabled on **all** GitLab servers.
1. Make sure to collect [`CONSUL_SERVER_NODES`](../postgresql/replication_and_failover.md#consul-information), which are the IP addresses or DNS records of the Consul server nodes, for the next step. Note they are presented as `Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z`
1. Create/edit `/etc/gitlab/gitlab.rb` and add the following configuration:
```ruby
# Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# Replace placeholders
# Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z
# with the addresses of the Consul server nodes
consul['configuration'] = {
retry_join: %w(Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z),
}
# Set the network addresses that the exporters will listen on
node_exporter['listen_address'] = '0.0.0.0:9100'
gitlab_workhorse['prometheus_listen_addr'] = '0.0.0.0:9229'
sidekiq['listen_address'] = "0.0.0.0"
puma['listen'] = '0.0.0.0'
# Add the monitoring node's IP address to the monitoring list that allows it to
# scrape the NGINX metrics. Replace placeholder `monitoring.gitlab.example.com` with
# the address and/or subnets gathered from the monitoring node(s).
gitlab_rails['monitoring_whitelist'] = ['monitoring.gitlab.example.com', '127.0.0.0/8']
nginx['status']['options']['allow'] = ['monitoring.gitlab.example.com', '127.0.0.0/8']
```
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
CAUTION: **Warning:**
If running Unicorn, after changing `unicorn['listen']` in `gitlab.rb`, and
running `sudo gitlab-ctl reconfigure`, it can take an extended period of time
for Unicorn to complete reloading after receiving a `HUP`. For more
information, see the
[issue](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/4401).
## Troubleshooting
- `mount: wrong fs type, bad option, bad superblock on`
You have not installed the necessary NFS client utilities. See step 1 above.
- `mount: mount point /var/opt/gitlab/... does not exist`
This particular directory does not exist on the NFS server. Ensure
the share is exported and exists on the NFS server and try to remount.
---
## Upgrading GitLab HA
GitLab HA installations can be upgraded with no downtime, but the
upgrade process must be carefully coordinated to avoid failures. See the
[Omnibus GitLab multi-node upgrade
document](https://docs.gitlab.com/omnibus/update/#multi-node--ha-deployment)
for more details.
Read more on high-availability configuration:
1. [Configure the database](../postgresql/replication_and_failover.md)
1. [Configure Redis](redis.md)
1. [Configure NFS](nfs.md)
1. [Configure the load balancers](load_balancer.md)
This document was moved to [another location](../reference_architectures/index.md).
---
type: reference
redirect_to: ../load_balancer.md
---
# Load Balancer for multi-node GitLab
In an multi-node GitLab configuration, you will need a load balancer to route
traffic to the application servers. The specifics on which load balancer to use
or the exact configuration is beyond the scope of GitLab documentation. We hope
that if you're managing HA systems like GitLab you have a load balancer of
choice already. Some examples including HAProxy (open-source), F5 Big-IP LTM,
and Citrix Net Scaler. This documentation will outline what ports and protocols
you need to use with GitLab.
## SSL
How will you handle SSL in your multi-node environment? There are several different
options:
- Each application node terminates SSL
- The load balancer(s) terminate SSL and communication is not secure between
the load balancer(s) and the application nodes
- The load balancer(s) terminate SSL and communication is *secure* between the
load balancer(s) and the application nodes
### Application nodes terminate SSL
Configure your load balancer(s) to pass connections on port 443 as 'TCP' rather
than 'HTTP(S)' protocol. This will pass the connection to the application nodes
NGINX service untouched. NGINX will have the SSL certificate and listen on port 443.
See [NGINX HTTPS documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https)
for details on managing SSL certificates and configuring NGINX.
### Load Balancer(s) terminate SSL without backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) will then be responsible for managing SSL certificates and
terminating SSL.
Since communication between the load balancer(s) and GitLab will not be secure,
there is some additional configuration needed. See
[NGINX Proxied SSL documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#supporting-proxied-ssl)
for details.
### Load Balancer(s) terminate SSL with backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) will be responsible for managing SSL certificates that
end users will see.
Traffic will also be secure between the load balancer(s) and NGINX in this
scenario. There is no need to add configuration for proxied SSL since the
connection will be secure all the way. However, configuration will need to be
added to GitLab to configure SSL certificates. See
[NGINX HTTPS documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https)
for details on managing SSL certificates and configuring NGINX.
## Ports
### Basic ports
| LB Port | Backend Port | Protocol |
| ------- | ------------ | ------------------------ |
| 80 | 80 | HTTP (*1*) |
| 443 | 443 | TCP or HTTPS (*1*) (*2*) |
| 22 | 22 | TCP |
- (*1*): [Web terminal](../../ci/environments/index.md#web-terminals) support requires
your load balancer to correctly handle WebSocket connections. When using
HTTP or HTTPS proxying, this means your load balancer must be configured
to pass through the `Connection` and `Upgrade` hop-by-hop headers. See the
[web terminal](../integration/terminal.md) integration guide for
more details.
- (*2*): When using HTTPS protocol for port 443, you will need to add an SSL
certificate to the load balancers. If you wish to terminate SSL at the
GitLab application server instead, use TCP protocol.
### GitLab Pages Ports
If you're using GitLab Pages with custom domain support you will need some
additional port configurations.
GitLab Pages requires a separate virtual IP address. Configure DNS to point the
`pages_external_url` from `/etc/gitlab/gitlab.rb` at the new virtual IP address. See the
[GitLab Pages documentation](../pages/index.md) for more information.
| LB Port | Backend Port | Protocol |
| ------- | ------------- | --------- |
| 80 | Varies (*1*) | HTTP |
| 443 | Varies (*1*) | TCP (*2*) |
- (*1*): The backend port for GitLab Pages depends on the
`gitlab_pages['external_http']` and `gitlab_pages['external_https']`
setting. See [GitLab Pages documentation](../pages/index.md) for more details.
- (*2*): Port 443 for GitLab Pages should always use the TCP protocol. Users can
configure custom domains with custom SSL, which would not be possible
if SSL was terminated at the load balancer.
### Alternate SSH Port
Some organizations have policies against opening SSH port 22. In this case,
it may be helpful to configure an alternate SSH hostname that allows users
to use SSH on port 443. An alternate SSH hostname will require a new virtual IP address
compared to the other GitLab HTTP configuration above.
Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`.
| LB Port | Backend Port | Protocol |
| ------- | ------------ | -------- |
| 443 | 22 | TCP |
## Readiness check
It is strongly recommend that multi-node deployments configure load balancers to utilize the [readiness check](../../user/admin_area/monitoring/health_check.md#readiness) to ensure a node is ready to accept traffic, before routing traffic to it. This is especially important when utilizing Puma, as there is a brief period during a restart where Puma will not accept requests.
---
Read more on high-availability configuration:
1. [Configure the database](../postgresql/replication_and_failover.md)
1. [Configure Redis](redis.md)
1. [Configure NFS](nfs.md)
1. [Configure the GitLab application servers](gitlab.md)
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->
This document was moved to [another location](../load_balancer.md).
---
type: reference
redirect_to: ../monitoring/prometheus/index.md
---
# Configuring a Monitoring node for Scaling and High Availability
> [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/3786) in GitLab 12.0.
You can configure a Prometheus node to monitor GitLab.
## Standalone Monitoring node using Omnibus GitLab
The Omnibus GitLab package can be used to configure a standalone Monitoring node running [Prometheus](../monitoring/prometheus/index.md) and [Grafana](../monitoring/performance/grafana_configuration.md).
The monitoring node is not highly available. See [Scaling and High Availability](../reference_architectures/index.md)
for an overview of GitLab scaling and high availability options.
The steps below are the minimum necessary to configure a Monitoring node running Prometheus and Grafana with
Omnibus:
1. SSH into the Monitoring node.
1. [Download/install](https://about.gitlab.com/install/) the Omnibus GitLab
package you want using **steps 1 and 2** from the GitLab downloads page.
- Do not complete any other steps on the download page.
1. Make sure to collect [`CONSUL_SERVER_NODES`](../postgresql/replication_and_failover.md#consul-information), which are the IP addresses or DNS records of the Consul server nodes, for the next step. Note they are presented as `Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z`
1. Edit `/etc/gitlab/gitlab.rb` and add the contents:
```ruby
external_url 'http://gitlab.example.com'
# Enable Prometheus
prometheus['enable'] = true
prometheus['listen_address'] = '0.0.0.0:9090'
prometheus['monitor_kubernetes'] = false
# Enable Login form
grafana['disable_login_form'] = false
# Enable Grafana
grafana['enable'] = true
grafana['admin_password'] = 'toomanysecrets'
# Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# Replace placeholders
# Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z
# with the addresses of the Consul server nodes
consul['configuration'] = {
retry_join: %w(Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z),
}
# Disable all other services
gitlab_rails['auto_migrate'] = false
alertmanager['enable'] = false
gitaly['enable'] = false
gitlab_exporter['enable'] = false
gitlab_workhorse['enable'] = false
nginx['enable'] = true
postgres_exporter['enable'] = false
postgresql['enable'] = false
redis['enable'] = false
redis_exporter['enable'] = false
sidekiq['enable'] = false
puma['enable'] = false
node_exporter['enable'] = false
gitlab_exporter['enable'] = false
```
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
The next step is to tell all the other nodes where the monitoring node is:
1. Edit `/etc/gitlab/gitlab.rb`, and add, or find and uncomment the following line:
```ruby
gitlab_rails['prometheus_address'] = '10.0.0.1:9090'
```
Where `10.0.0.1:9090` is the IP address and port of the Prometheus node.
1. Save the file and [reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to
take effect.
## Migrating to Service Discovery
Once monitoring using Service Discovery is enabled with `consul['monitoring_service_discovery'] = true`,
ensure that `prometheus['scrape_configs']` is not set in `/etc/gitlab/gitlab.rb`. Setting both
`consul['monitoring_service_discovery'] = true` and `prometheus['scrape_configs']` in `/etc/gitlab/gitlab.rb`
will result in errors.
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->
This document was moved to [another location](../monitoring/prometheus/index.md).
This diff is collapsed.
---
type: reference
redirect_to: ../nfs.md
---
# Configuring NFS for GitLab HA
Setting up NFS for a GitLab HA setup allows all applications nodes in a cluster
to share the same files and maintain data consistency. Application nodes in an HA
setup act as clients while the NFS server plays host.
> Note: The instructions provided in this documentation allow for setting a quick
proof of concept but will leave NFS as potential single point of failure and
therefore not recommended for use in production. Explore options such as [Pacemaker
and Corosync](https://clusterlabs.org) for highly available NFS in production.
Below are instructions for setting up an application node(client) in an HA cluster
to read from and write to a central NFS server(host).
NOTE: **Note:**
Using EFS may negatively impact performance. Please review the [relevant documentation](nfs.md#avoid-using-awss-elastic-file-system-efs) for additional details.
## NFS Server Setup
> Follow the instructions below to set up and configure your NFS server.
### Step 1 - Install NFS Server on Host
Installing the `nfs-kernel-server` package allows you to share directories with the clients running the GitLab application.
```shell
apt-get update
apt-get install nfs-kernel-server
```
### Step 2 - Export Host's Home Directory to Client
In this setup we will share the home directory on the host with the client. Edit the exports file as below to share the host's home directory with the client. If you have multiple clients running GitLab you must enter the client IP addresses in line in the `/etc/exports` file.
```plaintext
#/etc/exports for one client
/home <client_ip_address>(rw,sync,no_root_squash,no_subtree_check)
#/etc/exports for three clients
/home <client_ip_address>(rw,sync,no_root_squash,no_subtree_check) <client_2_ip_address>(rw,sync,no_root_squash,no_subtree_check) <client_3_ip_address>(rw,sync,no_root_squash,no_subtree_check)
```
Restart the NFS server after making changes to the `exports` file for the changes
to take effect.
```shell
systemctl restart nfs-kernel-server
```
NOTE: **Note:**
You may need to update your server's firewall. See the [firewall section](#nfs-in-a-firewalled-environment) at the end of this guide.
## Client / GitLab application node Setup
> Follow the instructions below to connect any GitLab Rails application node running
inside your HA environment to the NFS server configured above.
### Step 1 - Install NFS Common on Client
The `nfs-common` provides NFS functionality without installing server components which
we don't need running on the application nodes.
```shell
apt-get update
apt-get install nfs-common
```
### Step 2 - Create Mount Points on Client
Create a directory on the client that we can mount the shared directory from the host.
Please note that if your mount point directory contains any files they will be hidden
once the remote shares are mounted. An empty/new directory on the client is recommended
for this purpose.
```shell
mkdir -p /nfs/home
```
Confirm that the mount point works by mounting it on the client and checking that
it is mounted with the command below:
```shell
mount <host_ip_address>:/home
df -h
```
### Step 3 - Set up Automatic Mounts on Boot
Edit `/etc/fstab` on the client as below to mount the remote shares automatically at boot.
Note that GitLab requires advisory file locking, which is only supported natively in
NFS version 4. NFSv3 also supports locking as long as Linux Kernel 2.6.5+ is used.
We recommend using version 4 and do not specifically test NFSv3.
See [NFS documentation](nfs.md#nfs-client-mount-options) for guidance on mount options.
```plaintext
#/etc/fstab
<host_ip_address>:/home /nfs/home nfs4 defaults,hard,vers=4.1,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
```
Reboot the client and confirm that the mount point is mounted automatically.
NOTE: **Note:**
If you followed our guide to [GitLab Pages on a separate server](../pages/index.md#running-gitlab-pages-on-a-separate-server)
here, please continue there with the pages-specific NFS mounts.
The step below is for broader use-cases than only sharing pages data.
### Step 4 - Set up GitLab to Use NFS mounts
When using the default Omnibus configuration you will need to share 4 data locations
between all GitLab cluster nodes. No other locations should be shared. Changing the
default file locations in `gitlab.rb` on the client allows you to have one main mount
point and have all the required locations as subdirectories to use the NFS mount for
`git-data`.
```plaintext
git_data_dirs({"default" => {"path" => "/nfs/home/var/opt/gitlab-data/git-data"}})
gitlab_rails['uploads_directory'] = '/nfs/home/var/opt/gitlab-data/uploads'
gitlab_rails['shared_path'] = '/nfs/home/var/opt/gitlab-data/shared'
gitlab_ci['builds_directory'] = '/nfs/home/var/opt/gitlab-data/builds'
```
Save the changes in `gitlab.rb` and run `gitlab-ctl reconfigure`.
## NFS in a Firewalled Environment
If the traffic between your NFS server and NFS client(s) is subject to port filtering
by a firewall, then you will need to reconfigure that firewall to allow NFS communication.
[This guide from TDLP](http://tldp.org/HOWTO/NFS-HOWTO/security.html#FIREWALLS)
covers the basics of using NFS in a firewalled environment. Additionally, we encourage you to
search for and review the specific documentation for your operating system or distribution and your firewall software.
Example for Ubuntu:
Check that NFS traffic from the client is allowed by the firewall on the host by running
the command: `sudo ufw status`. If it's being blocked, then you can allow traffic from a specific
client with the command below.
```shell
sudo ufw allow from <client_ip_address> to any port nfs
```
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->
This document was moved to [another location](../nfs.md).
---
type: reference
redirect_to: ../postgresql/pgbouncer.md
---
# Working with the bundled PgBouncer service **(PREMIUM ONLY)**
As part of its High Availability stack, GitLab Premium includes a bundled version of [PgBouncer](http://www.pgbouncer.org/) that can be managed through `/etc/gitlab/gitlab.rb`. PgBouncer is used to seamlessly migrate database connections between servers in a failover scenario. Additionally, it can be used in a non-HA setup to pool connections, speeding up response time while reducing resource usage.
In a HA setup, it's recommended to run a PgBouncer node separately for each database node with an internal load balancer (TCP) serving each accordingly.
## Operations
### Running PgBouncer as part of an HA GitLab installation
This content has been moved to a [new location](../postgresql/replication_and_failover.md#configuring-the-pgbouncer-node).
### Running PgBouncer as part of a non-HA GitLab installation
1. Generate PGBOUNCER_USER_PASSWORD_HASH with the command `gitlab-ctl pg-password-md5 pgbouncer`
1. Generate SQL_USER_PASSWORD_HASH with the command `gitlab-ctl pg-password-md5 gitlab`. We'll also need to enter the plaintext SQL_USER_PASSWORD later
1. On your database node, ensure the following is set in your `/etc/gitlab/gitlab.rb`
```ruby
postgresql['pgbouncer_user_password'] = 'PGBOUNCER_USER_PASSWORD_HASH'
postgresql['sql_user_password'] = 'SQL_USER_PASSWORD_HASH'
postgresql['listen_address'] = 'XX.XX.XX.Y' # Where XX.XX.XX.Y is the ip address on the node postgresql should listen on
postgresql['md5_auth_cidr_addresses'] = %w(AA.AA.AA.B/32) # Where AA.AA.AA.B is the IP address of the pgbouncer node
```
1. Run `gitlab-ctl reconfigure`
NOTE: **Note:**
If the database was already running, it will need to be restarted after reconfigure by running `gitlab-ctl restart postgresql`.
1. On the node you are running PgBouncer on, make sure the following is set in `/etc/gitlab/gitlab.rb`
```ruby
pgbouncer['enable'] = true
pgbouncer['databases'] = {
gitlabhq_production: {
host: 'DATABASE_HOST',
user: 'pgbouncer',
password: 'PGBOUNCER_USER_PASSWORD_HASH'
}
}
```
1. Run `gitlab-ctl reconfigure`
1. On the node running Puma, make sure the following is set in `/etc/gitlab/gitlab.rb`
```ruby
gitlab_rails['db_host'] = 'PGBOUNCER_HOST'
gitlab_rails['db_port'] = '6432'
gitlab_rails['db_password'] = 'SQL_USER_PASSWORD'
```
1. Run `gitlab-ctl reconfigure`
1. At this point, your instance should connect to the database through PgBouncer. If you are having issues, see the [Troubleshooting](#troubleshooting) section
## Enable Monitoring
> [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/3786) in GitLab 12.0.
If you enable Monitoring, it must be enabled on **all** PgBouncer servers.
1. Create/edit `/etc/gitlab/gitlab.rb` and add the following configuration:
```ruby
# Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# Replace placeholders
# Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z
# with the addresses of the Consul server nodes
consul['configuration'] = {
retry_join: %w(Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z),
}
# Set the network addresses that the exporters will listen on
node_exporter['listen_address'] = '0.0.0.0:9100'
pgbouncer_exporter['listen_address'] = '0.0.0.0:9188'
```
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
### Interacting with PgBouncer
#### Administrative console
As part of Omnibus GitLab, we provide a command `gitlab-ctl pgb-console` to automatically connect to the PgBouncer administrative console. Please see the [PgBouncer documentation](https://www.pgbouncer.org/usage.html#admin-console) for detailed instructions on how to interact with the console.
To start a session, run
```shell
# gitlab-ctl pgb-console
Password for user pgbouncer:
psql (11.7, server 1.7.2/bouncer)
Type "help" for help.
pgbouncer=#
```
The password you will be prompted for is the PGBOUNCER_USER_PASSWORD
To get some basic information about the instance, run
```shell
pgbouncer=# show databases; show clients; show servers;
name | host | port | database | force_user | pool_size | reserve_pool | pool_mode | max_connections | current_connections
---------------------+-----------+------+---------------------+------------+-----------+--------------+-----------+-----------------+---------------------
gitlabhq_production | 127.0.0.1 | 5432 | gitlabhq_production | | 100 | 5 | | 0 | 1
pgbouncer | | 6432 | pgbouncer | pgbouncer | 2 | 0 | statement | 0 | 0
(2 rows)
type | user | database | state | addr | port | local_addr | local_port | connect_time | request_time | ptr | link
| remote_pid | tls
------+-----------+---------------------+--------+-----------+-------+------------+------------+---------------------+---------------------+-----------+------
+------------+-----
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44590 | 127.0.0.1 | 6432 | 2018-04-24 22:13:10 | 2018-04-24 22:17:10 | 0x12444c0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44592 | 127.0.0.1 | 6432 | 2018-04-24 22:13:10 | 2018-04-24 22:17:10 | 0x12447c0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44594 | 127.0.0.1 | 6432 | 2018-04-24 22:13:10 | 2018-04-24 22:17:10 | 0x1244940 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44706 | 127.0.0.1 | 6432 | 2018-04-24 22:14:22 | 2018-04-24 22:16:31 | 0x1244ac0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44708 | 127.0.0.1 | 6432 | 2018-04-24 22:14:22 | 2018-04-24 22:15:15 | 0x1244c40 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44794 | 127.0.0.1 | 6432 | 2018-04-24 22:15:15 | 2018-04-24 22:15:15 | 0x1244dc0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44798 | 127.0.0.1 | 6432 | 2018-04-24 22:15:15 | 2018-04-24 22:16:31 | 0x1244f40 |
| 0 |
C | pgbouncer | pgbouncer | active | 127.0.0.1 | 44660 | 127.0.0.1 | 6432 | 2018-04-24 22:13:51 | 2018-04-24 22:17:12 | 0x1244640 |
| 0 |
(8 rows)
type | user | database | state | addr | port | local_addr | local_port | connect_time | request_time | ptr | link | rem
ote_pid | tls
------+--------+---------------------+-------+-----------+------+------------+------------+---------------------+---------------------+-----------+------+----
--------+-----
S | gitlab | gitlabhq_production | idle | 127.0.0.1 | 5432 | 127.0.0.1 | 35646 | 2018-04-24 22:15:15 | 2018-04-24 22:17:10 | 0x124dca0 | |
19980 |
(1 row)
```
## Troubleshooting
In case you are experiencing any issues connecting through PgBouncer, the first place to check is always the logs:
```shell
# gitlab-ctl tail pgbouncer
```
Additionally, you can check the output from `show databases` in the [Administrative console](#administrative-console). In the output, you would expect to see values in the `host` field for the `gitlabhq_production` database. Additionally, `current_connections` should be greater than 1.
### Message: `LOG: invalid CIDR mask in address`
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting.md#message-log--invalid-cidr-mask-in-address).
### Message: `LOG: invalid IP mask "md5": Name or service not known`
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting.md#message-log--invalid-ip-mask-md5-name-or-service-not-known).
This document was moved to [another location](../postgresql/pgbouncer.md).
---
type: reference
redirect_to: ../sidekiq.md
---
# Configuring Sidekiq
This section discusses how to configure an external Sidekiq instance.
Sidekiq requires connection to the Redis, PostgreSQL and Gitaly instance.
To configure the Sidekiq node:
1. SSH into the Sidekiq server.
1. [Download/install](https://about.gitlab.com/install/) the Omnibus GitLab package
you want using steps 1 and 2 from the GitLab downloads page.
**Do not complete any other steps on the download page.**
1. Open `/etc/gitlab/gitlab.rb` with your editor.
1. Generate the Sidekiq configuration:
```ruby
sidekiq['listen_address'] = "10.10.1.48"
## Optional: Enable extra Sidekiq processes
sidekiq_cluster['enable'] = true
sidekiq_cluster['enable'] = true
"elastic_indexer"
]
```
1. Setup Sidekiq's connection to Redis:
```ruby
## Must be the same in every sentinel node
redis['master_name'] = 'gitlab-redis'
## The same password for Redis authentication you set up for the master node.
redis['master_password'] = 'YOUR_PASSOWORD'
## A list of sentinels with `host` and `port`
gitlab_rails['redis_sentinels'] = [
{'host' => '10.10.1.34', 'port' => 26379},
{'host' => '10.10.1.35', 'port' => 26379},
{'host' => '10.10.1.36', 'port' => 26379},
]
```
1. Setup Sidekiq's connection to Gitaly:
```ruby
git_data_dirs({
'default' => { 'gitaly_address' => 'tcp://gitaly:8075' },
})
gitlab_rails['gitaly_token'] = 'YOUR_TOKEN'
```
1. Setup Sidekiq's connection to PostgreSQL:
```ruby
gitlab_rails['db_host'] = '10.10.1.30'
gitlab_rails['db_password'] = 'YOUR_PASSOWORD'
gitlab_rails['db_port'] = '5432'
gitlab_rails['db_adapter'] = 'postgresql'
gitlab_rails['db_encoding'] = 'unicode'
gitlab_rails['auto_migrate'] = false
```
Remember to add the Sidekiq nodes to PostgreSQL's trusted addresses:
```ruby
postgresql['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.10.1.30/32 10.10.1.31/32 10.10.1.32/32 10.10.1.33/32 10.10.1.38/32)
```
1. Disable other services:
```ruby
nginx['enable'] = false
grafana['enable'] = false
prometheus['enable'] = false
gitlab_rails['auto_migrate'] = false
alertmanager['enable'] = false
gitaly['enable'] = false
gitlab_monitor['enable'] = false
gitlab_workhorse['enable'] = false
nginx['enable'] = false
postgres_exporter['enable'] = false
postgresql['enable'] = false
redis['enable'] = false
redis_exporter['enable'] = false
puma['enable'] = false
gitlab_exporter['enable'] = false
```
1. Run `gitlab-ctl reconfigure`.
NOTE: **Note:**
You will need to restart the Sidekiq nodes after an update has occurred and database
migrations performed.
## Example configuration
Here's what the ending `/etc/gitlab/gitlab.rb` would look like:
```ruby
########################################
##### Services Disabled ###
########################################
nginx['enable'] = false
grafana['enable'] = false
prometheus['enable'] = false
gitlab_rails['auto_migrate'] = false
alertmanager['enable'] = false
gitaly['enable'] = false
gitlab_monitor['enable'] = false
gitlab_workhorse['enable'] = false
nginx['enable'] = false
postgres_exporter['enable'] = false
postgresql['enable'] = false
redis['enable'] = false
redis_exporter['enable'] = false
puma['enable'] = false
gitlab_exporter['enable'] = false
########################################
#### Redis ###
########################################
## Must be the same in every sentinel node
redis['master_name'] = 'gitlab-redis'
## The same password for Redis authentication you set up for the master node.
redis['master_password'] = 'YOUR_PASSOWORD'
## A list of sentinels with `host` and `port`
gitlab_rails['redis_sentinels'] = [
{'host' => '10.10.1.34', 'port' => 26379},
{'host' => '10.10.1.35', 'port' => 26379},
{'host' => '10.10.1.36', 'port' => 26379},
]
#######################################
### Gitaly ###
#######################################
git_data_dirs({
'default' => { 'gitaly_address' => 'tcp://gitaly:8075' },
})
gitlab_rails['gitaly_token'] = 'YOUR_TOKEN'
#######################################
### Postgres ###
#######################################
gitlab_rails['db_host'] = '10.10.1.30'
gitlab_rails['db_password'] = 'YOUR_PASSOWORD'
gitlab_rails['db_port'] = '5432'
gitlab_rails['db_adapter'] = 'postgresql'
gitlab_rails['db_encoding'] = 'unicode'
gitlab_rails['auto_migrate'] = false
#######################################
### Sidekiq configuration ###
#######################################
sidekiq['listen_address'] = "10.10.1.48"
#######################################
### Monitoring configuration ###
#######################################
consul['enable'] = true
consul['monitoring_service_discovery'] = true
consul['configuration'] = {
bind_addr: '10.10.1.48',
retry_join: %w(10.10.1.34 10.10.1.35 10.10.1.36)
}
# Set the network addresses that the exporters will listen on
node_exporter['listen_address'] = '10.10.1.48:9100'
# Rails Status for prometheus
gitlab_rails['monitoring_whitelist'] = ['10.10.1.42', '127.0.0.1']
```
## Further reading
Related Sidekiq configuration:
1. [Extra Sidekiq processes](../operations/extra_sidekiq_processes.md)
1. [Using the GitLab-Sidekiq chart](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/)
This document was moved to [another location](../sidekiq.md).
......@@ -57,7 +57,7 @@ through to the next one in the chain. If you installed GitLab using Omnibus, or
from source, starting with GitLab 8.15, this should be done by the default
configuration, so there's no need for you to do anything.
However, if you run a [load balancer](../high_availability/load_balancer.md) in
However, if you run a [load balancer](../load_balancer.md) in
front of GitLab, you may need to make some changes to your configuration. These
guides document the necessary steps for a selection of popular reverse proxies:
......
---
type: reference
---
# Load Balancer for multi-node GitLab
In an multi-node GitLab configuration, you will need a load balancer to route
traffic to the application servers. The specifics on which load balancer to use
or the exact configuration is beyond the scope of GitLab documentation. We hope
that if you're managing HA systems like GitLab you have a load balancer of
choice already. Some examples including HAProxy (open-source), F5 Big-IP LTM,
and Citrix Net Scaler. This documentation will outline what ports and protocols
you need to use with GitLab.
## SSL
How will you handle SSL in your multi-node environment? There are several different
options:
- Each application node terminates SSL
- The load balancer(s) terminate SSL and communication is not secure between
the load balancer(s) and the application nodes
- The load balancer(s) terminate SSL and communication is *secure* between the
load balancer(s) and the application nodes
### Application nodes terminate SSL
Configure your load balancer(s) to pass connections on port 443 as 'TCP' rather
than 'HTTP(S)' protocol. This will pass the connection to the application nodes
NGINX service untouched. NGINX will have the SSL certificate and listen on port 443.
See [NGINX HTTPS documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https)
for details on managing SSL certificates and configuring NGINX.
### Load Balancer(s) terminate SSL without backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) will then be responsible for managing SSL certificates and
terminating SSL.
Since communication between the load balancer(s) and GitLab will not be secure,
there is some additional configuration needed. See
[NGINX Proxied SSL documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#supporting-proxied-ssl)
for details.
### Load Balancer(s) terminate SSL with backend SSL
Configure your load balancer(s) to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancer(s) will be responsible for managing SSL certificates that
end users will see.
Traffic will also be secure between the load balancer(s) and NGINX in this
scenario. There is no need to add configuration for proxied SSL since the
connection will be secure all the way. However, configuration will need to be
added to GitLab to configure SSL certificates. See
[NGINX HTTPS documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https)
for details on managing SSL certificates and configuring NGINX.
## Ports
### Basic ports
| LB Port | Backend Port | Protocol |
| ------- | ------------ | ------------------------ |
| 80 | 80 | HTTP (*1*) |
| 443 | 443 | TCP or HTTPS (*1*) (*2*) |
| 22 | 22 | TCP |
- (*1*): [Web terminal](../ci/environments/index.md#web-terminals) support requires
your load balancer to correctly handle WebSocket connections. When using
HTTP or HTTPS proxying, this means your load balancer must be configured
to pass through the `Connection` and `Upgrade` hop-by-hop headers. See the
[web terminal](integration/terminal.md) integration guide for
more details.
- (*2*): When using HTTPS protocol for port 443, you will need to add an SSL
certificate to the load balancers. If you wish to terminate SSL at the
GitLab application server instead, use TCP protocol.
### GitLab Pages Ports
If you're using GitLab Pages with custom domain support you will need some
additional port configurations.
GitLab Pages requires a separate virtual IP address. Configure DNS to point the
`pages_external_url` from `/etc/gitlab/gitlab.rb` at the new virtual IP address. See the
[GitLab Pages documentation](pages/index.md) for more information.
| LB Port | Backend Port | Protocol |
| ------- | ------------- | --------- |
| 80 | Varies (*1*) | HTTP |
| 443 | Varies (*1*) | TCP (*2*) |
- (*1*): The backend port for GitLab Pages depends on the
`gitlab_pages['external_http']` and `gitlab_pages['external_https']`
setting. See [GitLab Pages documentation](pages/index.md) for more details.
- (*2*): Port 443 for GitLab Pages should always use the TCP protocol. Users can
configure custom domains with custom SSL, which would not be possible
if SSL was terminated at the load balancer.
### Alternate SSH Port
Some organizations have policies against opening SSH port 22. In this case,
it may be helpful to configure an alternate SSH hostname that allows users
to use SSH on port 443. An alternate SSH hostname will require a new virtual IP address
compared to the other GitLab HTTP configuration above.
Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`.
| LB Port | Backend Port | Protocol |
| ------- | ------------ | -------- |
| 443 | 22 | TCP |
## Readiness check
It is strongly recommend that multi-node deployments configure load balancers to utilize the [readiness check](../user/admin_area/monitoring/health_check.md#readiness) to ensure a node is ready to accept traffic, before routing traffic to it. This is especially important when utilizing Puma, as there is a brief period during a restart where Puma will not accept requests.
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->
......@@ -23,7 +23,7 @@ From left to right, it displays:
details:
![Gitaly profiling using the Performance Bar](img/performance_bar_gitaly_calls.png)
- **Rugged calls**: the time taken (in milliseconds) and the total number of
[Rugged](../../high_availability/nfs.md#improving-nfs-performance-with-gitlab) calls.
[Rugged](../../nfs.md#improving-nfs-performance-with-gitlab) calls.
Click to display a modal window with more details:
![Rugged profiling using the Performance Bar](img/performance_bar_rugged_calls.png)
- **Redis calls**: the time taken (in milliseconds) and the total number of
......
......@@ -109,6 +109,81 @@ prometheus['scrape_configs'] = [
]
```
### Standalone Prometheus using Omnibus GitLab
The Omnibus GitLab package can be used to configure a standalone Monitoring node running Prometheus and [Grafana](../performance/grafana_configuration.md).
The steps below are the minimum necessary to configure a Monitoring node running Prometheus and Grafana with Omnibus GitLab:
1. SSH into the Monitoring node.
1. [Install](https://about.gitlab.com/install/) the Omnibus GitLab
package you want using **steps 1 and 2** from the GitLab downloads page, but
do not follow the remaining steps.
1. Make sure to collect the IP addresses or DNS records of the Consul server nodes, for the next step.
1. Edit `/etc/gitlab/gitlab.rb` and add the contents:
```ruby
external_url 'http://gitlab.example.com'
# Enable Prometheus
prometheus['enable'] = true
prometheus['listen_address'] = '0.0.0.0:9090'
prometheus['monitor_kubernetes'] = false
# Enable Login form
grafana['disable_login_form'] = false
# Enable Grafana
grafana['enable'] = true
grafana['admin_password'] = 'toomanysecrets'
# Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# The addresses can be IPs or FQDNs
consul['configuration'] = {
retry_join: %w(10.0.0.1 10.0.0.2 10.0.0.3),
}
# Disable all other services
gitlab_rails['auto_migrate'] = false
alertmanager['enable'] = false
gitaly['enable'] = false
gitlab_exporter['enable'] = false
gitlab_workhorse['enable'] = false
nginx['enable'] = true
postgres_exporter['enable'] = false
postgresql['enable'] = false
redis['enable'] = false
redis_exporter['enable'] = false
sidekiq['enable'] = false
puma['enable'] = false
node_exporter['enable'] = false
gitlab_exporter['enable'] = false
```
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
The next step is to tell all the other nodes where the monitoring node is:
1. Edit `/etc/gitlab/gitlab.rb`, and add, or find and uncomment the following line:
```ruby
gitlab_rails['prometheus_address'] = '10.0.0.1:9090'
```
Where `10.0.0.1:9090` is the IP address and port of the Prometheus node.
1. Save the file and [reconfigure GitLab](../../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to
take effect.
NOTE: **Note:**
Once monitoring using Service Discovery is enabled with `consul['monitoring_service_discovery'] = true`,
ensure that `prometheus['scrape_configs']` is not set in `/etc/gitlab/gitlab.rb`. Setting both
`consul['monitoring_service_discovery'] = true` and `prometheus['scrape_configs']` in `/etc/gitlab/gitlab.rb`
will result in errors.
### Using an external Prometheus server
NOTE: **Note:**
......@@ -128,14 +203,24 @@ To use an external Prometheus server:
1. Set each bundled service's [exporter](#bundled-software-metrics) to listen on a network address, for example:
```ruby
node_exporter['listen_address'] = '0.0.0.0:9100'
gitlab_workhorse['prometheus_listen_addr'] = "0.0.0.0:9229"
# Rails nodes
gitlab_exporter['listen_address'] = '0.0.0.0'
sidekiq['listen_address'] = '0.0.0.0'
gitlab_exporter['listen_port'] = '9168'
node_exporter['listen_address'] = '0.0.0.0:9100'
# Sidekiq nodes
sidekiq['listen_address'] = '0.0.0.0'
# Redis nodes
redis_exporter['listen_address'] = '0.0.0.0:9121'
# PostgreSQL nodes
postgres_exporter['listen_address'] = '0.0.0.0:9187'
# Gitaly nodes
gitaly['prometheus_listen_addr'] = "0.0.0.0:9236"
gitlab_workhorse['prometheus_listen_addr'] = "0.0.0.0:9229"
```
1. Install and set up a dedicated Prometheus instance, if necessary, using the [official installation instructions](https://prometheus.io/docs/prometheus/latest/installation/).
......
This diff is collapsed.
......@@ -27,7 +27,7 @@ will _not_ carry over automatically, due to differences between the two applicat
deployments, see [Configuring Puma Settings](https://docs.gitlab.com/omnibus/settings/puma.html#configuring-puma-settings).
For Helm based deployments, see the [Webservice Chart documentation](https://docs.gitlab.com/charts/charts/gitlab/webservice/index.html).
Additionally we strongly recommend that multi-node deployments [configure their load balancers to utilize the readiness check](../high_availability/load_balancer.md#readiness-check) due to a difference between Unicorn and Puma in how they handle connections during a restart of the service.
Additionally we strongly recommend that multi-node deployments [configure their load balancers to utilize the readiness check](../load_balancer.md#readiness-check) due to a difference between Unicorn and Puma in how they handle connections during a restart of the service.
## Performance caveat when using Puma with Rugged
......
......@@ -536,7 +536,7 @@ database encryption. Proceed with caution.
1. Set up a new server. This will become the **Pages server**.
1. Create an [NFS share](../high_availability/nfs_host_client_setup.md)
1. Create an [NFS share](../nfs.md)
on the **Pages server** and configure this share to
allow access from your main **GitLab server**.
Note that the example there is more general and
......
......@@ -32,7 +32,7 @@ If you use a cloud-managed service, or provide your own PostgreSQL instance:
gitlab_rails['db_password'] = 'DB password'
```
For more information on GitLab HA setups, refer to [configuring GitLab for HA](../high_availability/gitlab.md).
For more information on GitLab multi-node setups, refer to the [reference architectures](../reference_architectures/index.md).
1. Reconfigure for the changes to take effect:
......
---
type: reference
---
# Working with the bundled PgBouncer service **(PREMIUM ONLY)**
[PgBouncer](http://www.pgbouncer.org/) is used to seamlessly migrate database
connections between servers in a failover scenario. Additionally, it can be used
in a non-fault-tolerant setup to pool connections, speeding up response time
while reducing resource usage.
GitLab Premium includes a bundled version of PgBouncer that can be managed
through `/etc/gitlab/gitlab.rb`.
## PgBouncer as part of a fault-tolerant GitLab installation
This content has been moved to a [new location](replication_and_failover.md#configuring-the-pgbouncer-node).
## PgBouncer as part of a non-fault-tolerant GitLab installation
1. Generate PGBOUNCER_USER_PASSWORD_HASH with the command `gitlab-ctl pg-password-md5 pgbouncer`
1. Generate SQL_USER_PASSWORD_HASH with the command `gitlab-ctl pg-password-md5 gitlab`. We'll also need to enter the plaintext SQL_USER_PASSWORD later
1. On your database node, ensure the following is set in your `/etc/gitlab/gitlab.rb`
```ruby
postgresql['pgbouncer_user_password'] = 'PGBOUNCER_USER_PASSWORD_HASH'
postgresql['sql_user_password'] = 'SQL_USER_PASSWORD_HASH'
postgresql['listen_address'] = 'XX.XX.XX.Y' # Where XX.XX.XX.Y is the ip address on the node postgresql should listen on
postgresql['md5_auth_cidr_addresses'] = %w(AA.AA.AA.B/32) # Where AA.AA.AA.B is the IP address of the pgbouncer node
```
1. Run `gitlab-ctl reconfigure`
NOTE: **Note:**
If the database was already running, it will need to be restarted after reconfigure by running `gitlab-ctl restart postgresql`.
1. On the node you are running PgBouncer on, make sure the following is set in `/etc/gitlab/gitlab.rb`
```ruby
pgbouncer['enable'] = true
pgbouncer['databases'] = {
gitlabhq_production: {
host: 'DATABASE_HOST',
user: 'pgbouncer',
password: 'PGBOUNCER_USER_PASSWORD_HASH'
}
}
```
1. Run `gitlab-ctl reconfigure`
1. On the node running Puma, make sure the following is set in `/etc/gitlab/gitlab.rb`
```ruby
gitlab_rails['db_host'] = 'PGBOUNCER_HOST'
gitlab_rails['db_port'] = '6432'
gitlab_rails['db_password'] = 'SQL_USER_PASSWORD'
```
1. Run `gitlab-ctl reconfigure`
1. At this point, your instance should connect to the database through PgBouncer. If you are having issues, see the [Troubleshooting](#troubleshooting) section
## Enable Monitoring
> [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/3786) in GitLab 12.0.
If you enable Monitoring, it must be enabled on **all** PgBouncer servers.
1. Create/edit `/etc/gitlab/gitlab.rb` and add the following configuration:
```ruby
# Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# Replace placeholders
# Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z
# with the addresses of the Consul server nodes
consul['configuration'] = {
retry_join: %w(Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z),
}
# Set the network addresses that the exporters will listen on
node_exporter['listen_address'] = '0.0.0.0:9100'
pgbouncer_exporter['listen_address'] = '0.0.0.0:9188'
```
1. Run `sudo gitlab-ctl reconfigure` to compile the configuration.
## Administrative console
As part of Omnibus GitLab, a command is provided to automatically connect to the
PgBouncer administrative console. See the
[PgBouncer documentation](https://www.pgbouncer.org/usage.html#admin-console)
for detailed instructions on how to interact with the console.
To start a session run the following and provide the password for the `pgbouncer`
user:
```shell
sudo gitlab-ctl pgb-console
```
To get some basic information about the instance:
```shell
pgbouncer=# show databases; show clients; show servers;
name | host | port | database | force_user | pool_size | reserve_pool | pool_mode | max_connections | current_connections
---------------------+-----------+------+---------------------+------------+-----------+--------------+-----------+-----------------+---------------------
gitlabhq_production | 127.0.0.1 | 5432 | gitlabhq_production | | 100 | 5 | | 0 | 1
pgbouncer | | 6432 | pgbouncer | pgbouncer | 2 | 0 | statement | 0 | 0
(2 rows)
type | user | database | state | addr | port | local_addr | local_port | connect_time | request_time | ptr | link
| remote_pid | tls
------+-----------+---------------------+--------+-----------+-------+------------+------------+---------------------+---------------------+-----------+------
+------------+-----
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44590 | 127.0.0.1 | 6432 | 2018-04-24 22:13:10 | 2018-04-24 22:17:10 | 0x12444c0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44592 | 127.0.0.1 | 6432 | 2018-04-24 22:13:10 | 2018-04-24 22:17:10 | 0x12447c0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44594 | 127.0.0.1 | 6432 | 2018-04-24 22:13:10 | 2018-04-24 22:17:10 | 0x1244940 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44706 | 127.0.0.1 | 6432 | 2018-04-24 22:14:22 | 2018-04-24 22:16:31 | 0x1244ac0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44708 | 127.0.0.1 | 6432 | 2018-04-24 22:14:22 | 2018-04-24 22:15:15 | 0x1244c40 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44794 | 127.0.0.1 | 6432 | 2018-04-24 22:15:15 | 2018-04-24 22:15:15 | 0x1244dc0 |
| 0 |
C | gitlab | gitlabhq_production | active | 127.0.0.1 | 44798 | 127.0.0.1 | 6432 | 2018-04-24 22:15:15 | 2018-04-24 22:16:31 | 0x1244f40 |
| 0 |
C | pgbouncer | pgbouncer | active | 127.0.0.1 | 44660 | 127.0.0.1 | 6432 | 2018-04-24 22:13:51 | 2018-04-24 22:17:12 | 0x1244640 |
| 0 |
(8 rows)
type | user | database | state | addr | port | local_addr | local_port | connect_time | request_time | ptr | link | rem
ote_pid | tls
------+--------+---------------------+-------+-----------+------+------------+------------+---------------------+---------------------+-----------+------+----
--------+-----
S | gitlab | gitlabhq_production | idle | 127.0.0.1 | 5432 | 127.0.0.1 | 35646 | 2018-04-24 22:15:15 | 2018-04-24 22:17:10 | 0x124dca0 | |
19980 |
(1 row)
```
## Troubleshooting
In case you are experiencing any issues connecting through PgBouncer, the first
place to check is always the logs:
```shell
sudo gitlab-ctl tail pgbouncer
```
Additionally, you can check the output from `show databases` in the
[administrative console](#administrative-console). In the output, you would expect
to see values in the `host` field for the `gitlabhq_production` database.
Additionally, `current_connections` should be greater than 1.
### Message: `LOG: invalid CIDR mask in address`
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting.md#message-log--invalid-cidr-mask-in-address).
### Message: `LOG: invalid IP mask "md5": Name or service not known`
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting.md#message-log--invalid-ip-mask-md5-name-or-service-not-known).
......@@ -203,7 +203,7 @@ When installing the GitLab package, do not supply `EXTERNAL_URL` value.
### Configuring the Database nodes
1. Make sure to [configure the Consul nodes](../high_availability/consul.md).
1. Make sure to [configure the Consul nodes](../consul.md).
1. Make sure you collect [`CONSUL_SERVER_NODES`](#consul-information), [`PGBOUNCER_PASSWORD_HASH`](#pgbouncer-information), [`POSTGRESQL_PASSWORD_HASH`](#postgresql-information), the [number of db nodes](#postgresql-information), and the [network address](#network-information) before executing the next step.
1. On the master database node, edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
......@@ -466,7 +466,7 @@ Check the [Troubleshooting section](#troubleshooting) before proceeding.
gitlab-ctl write-pgpass --host 127.0.0.1 --database pgbouncer --user pgbouncer --hostuser gitlab-consul
```
1. [Enable monitoring](../high_availability/pgbouncer.md#enable-monitoring)
1. [Enable monitoring](../postgresql/pgbouncer.md#enable-monitoring)
#### PgBouncer Checkpoint
......@@ -795,7 +795,7 @@ After deploying the configuration follow these steps:
This example uses 3 PostgreSQL servers, and 1 application node (with PgBouncer setup alongside).
It differs from the [recommended setup](#example-recommended-setup) by moving the Consul servers into the same servers we use for PostgreSQL.
The trade-off is between reducing server counts, against the increased operational complexity of needing to deal with PostgreSQL [failover](#failover-procedure) and [restore](#restore-procedure) procedures in addition to [Consul outage recovery](../high_availability/consul.md#outage-recovery) on the same set of machines.
The trade-off is between reducing server counts, against the increased operational complexity of needing to deal with PostgreSQL [failover](#failover-procedure) and [restore](#restore-procedure) procedures in addition to [Consul outage recovery](../consul.md#outage-recovery) on the same set of machines.
In this example we start with all servers on the same 10.6.0.0/16 private network range, they can connect to each freely other on those addresses.
......@@ -1087,7 +1087,7 @@ To restart either service, run `gitlab-ctl restart SERVICE`
For PostgreSQL, it is usually safe to restart the master node by default. Automatic failover defaults to a 1 minute timeout. Provided the database returns before then, nothing else needs to be done. To be safe, you can stop `repmgrd` on the standby nodes first with `gitlab-ctl stop repmgrd`, then start afterwards with `gitlab-ctl start repmgrd`.
On the Consul server nodes, it is important to restart the Consul service in a controlled fashion. Read our [Consul documentation](../high_availability/consul.md#restarting-the-server-cluster) for instructions on how to restart the service.
On the Consul server nodes, it is important to [restart the Consul service](../consul.md#restart-consul) in a controlled manner.
### `gitlab-ctl repmgr-check-master` command produces errors
......@@ -1134,11 +1134,10 @@ postgresql['trust_auth_cidr_addresses'] = %w(123.123.123.123/32 <other_cidrs>)
### Issues with other components
If you're running into an issue with a component not outlined here, be sure to check the troubleshooting section of their specific documentation page.
If you're running into an issue with a component not outlined here, be sure to check the troubleshooting section of their specific documentation page:
- [Consul](../high_availability/consul.md#troubleshooting)
- [Consul](../consul.md#troubleshooting-consul)
- [PostgreSQL](https://docs.gitlab.com/omnibus/settings/database.html#troubleshooting)
- [GitLab application](../high_availability/gitlab.md#troubleshooting)
## Patroni
......
......@@ -466,7 +466,7 @@ While it doesn't require a list of all Sentinel nodes, in case of a failure,
it needs to access at least one of the listed.
NOTE: **Note:**
The following steps should be performed in the [GitLab application server](../high_availability/gitlab.md)
The following steps should be performed in the GitLab application server
which ideally should not have Redis or Sentinels on it for a HA setup.
1. SSH into the server where the GitLab application is installed.
......@@ -736,6 +736,5 @@ Read more:
1. [Reference architectures](../reference_architectures/index.md)
1. [Configure the database](../postgresql/replication_and_failover.md)
1. [Configure NFS](../high_availability/nfs.md)
1. [Configure the GitLab application servers](../high_availability/gitlab.md)
1. [Configure the load balancers](../high_availability/load_balancer.md)
1. [Configure NFS](../nfs.md)
1. [Configure the load balancers](../load_balancer.md)
......@@ -221,7 +221,7 @@ the correct credentials for the Sentinel nodes.
While it doesn't require a list of all Sentinel nodes, in case of a failure,
it needs to access at least one of listed ones.
The following steps should be performed in the [GitLab application server](../high_availability/gitlab.md)
The following steps should be performed in the GitLab application server
which ideally should not have Redis or Sentinels in the same machine:
1. Edit `/home/git/gitlab/config/resque.yml` following the example in
......
......@@ -557,7 +557,7 @@ On each node perform the following:
1. Specify the necessary NFS mounts in `/etc/fstab`.
The exact contents of `/etc/fstab` will depend on how you chose
to configure your NFS server. See the [NFS documentation](../high_availability/nfs.md)
to configure your NFS server. See the [NFS documentation](../nfs.md)
for examples and the various options.
1. Create the shared directories. These may be different depending on your NFS
......@@ -855,7 +855,7 @@ along with [Gitaly](#configure-gitaly), are recommended over using NFS whenever
possible. However, if you intend to use GitLab Pages,
[you must use NFS](troubleshooting.md#gitlab-pages-requires-nfs).
For information about configuring NFS, see the [NFS documentation page](../high_availability/nfs.md).
For information about configuring NFS, see the [NFS documentation page](../nfs.md).
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
......
......@@ -1445,7 +1445,7 @@ On each node perform the following:
1. Specify the necessary NFS mounts in `/etc/fstab`.
The exact contents of `/etc/fstab` will depend on how you chose
to configure your NFS server. See the [NFS documentation](../high_availability/nfs.md)
to configure your NFS server. See the [NFS documentation](../nfs.md)
for examples and the various options.
1. Create the shared directories. These may be different depending on your NFS
......@@ -1754,7 +1754,7 @@ work.
are recommended over NFS wherever possible for improved performance. If you intend
to use GitLab Pages, this currently [requires NFS](troubleshooting.md#gitlab-pages-requires-nfs).
See how to [configure NFS](../high_availability/nfs.md).
See how to [configure NFS](../nfs.md).
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
......
......@@ -1444,7 +1444,7 @@ On each node perform the following:
1. Specify the necessary NFS mounts in `/etc/fstab`.
The exact contents of `/etc/fstab` will depend on how you chose
to configure your NFS server. See the [NFS documentation](../high_availability/nfs.md)
to configure your NFS server. See the [NFS documentation](../nfs.md)
for examples and the various options.
1. Create the shared directories. These may be different depending on your NFS
......@@ -1753,7 +1753,7 @@ work.
are recommended over NFS wherever possible for improved performance. If you intend
to use GitLab Pages, this currently [requires NFS](troubleshooting.md#gitlab-pages-requires-nfs).
See how to [configure NFS](../high_availability/nfs.md).
See how to [configure NFS](../nfs.md).
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
......
......@@ -177,7 +177,7 @@ column.
| NFS ([5](#footnotes)) ([7](#footnotes)) | Shared disk storage service. Can be used as an alternative Object Storage. Required for GitLab Pages | [NFS configuration](../high_availability/nfs.md) | No |
| [Consul](../../development/architecture.md#consul) ([3](#footnotes)) | Service discovery and health checks/failover | [Consul configuration](../high_availability/consul.md) **(PREMIUM ONLY)** | Yes |
| [PostgreSQL](../../development/architecture.md#postgresql) | Database | [PostgreSQL configuration](https://docs.gitlab.com/omnibus/settings/database.html) | Yes |
| [PgBouncer](../../development/architecture.md#pgbouncer) | Database connection pooler | [PgBouncer configuration](../high_availability/pgbouncer.md#running-pgbouncer-as-part-of-a-non-ha-gitlab-installation) **(PREMIUM ONLY)** | Yes |
| [PgBouncer](../../development/architecture.md#pgbouncer) | Database connection pooler | [PgBouncer configuration](../postgresql/pgbouncer.md) **(PREMIUM ONLY)** | Yes |
| Repmgr | PostgreSQL cluster management and failover | [PostgreSQL and Repmgr configuration](../postgresql/replication_and_failover.md) | Yes |
| Patroni | An alternative PostgreSQL cluster management and failover | [PostgreSQL and Patroni configuration](../postgresql/replication_and_failover.md#patroni) | Yes |
| [Redis](../../development/architecture.md#redis) ([3](#footnotes)) | Key/value store for fast data lookup and caching | [Redis configuration](../high_availability/redis.md) | Yes |
......
......@@ -17,7 +17,7 @@ with the Fog library that GitLab uses. Symptoms include:
### GitLab Pages requires NFS
If you intend to use [GitLab Pages](../../user/project/pages/index.md), this currently requires
[NFS](../high_availability/nfs.md). There is [work in progress](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/196)
[NFS](../nfs.md). There is [work in progress](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/196)
to remove this dependency. In the future, GitLab Pages may use
[object storage](https://gitlab.com/gitlab-org/gitlab/-/issues/208135).
......@@ -524,7 +524,7 @@ To restart either service, run `gitlab-ctl restart SERVICE`
For PostgreSQL, it is usually safe to restart the master node by default. Automatic failover defaults to a 1 minute timeout. Provided the database returns before then, nothing else needs to be done. To be safe, you can stop `repmgrd` on the standby nodes first with `gitlab-ctl stop repmgrd`, then start afterwards with `gitlab-ctl start repmgrd`.
On the Consul server nodes, it is important to restart the Consul service in a controlled fashion. Read our [Consul documentation](../high_availability/consul.md#restarting-the-server-cluster) for instructions on how to restart the service.
On the Consul server nodes, it is important to restart the Consul service in a controlled fashion. Read our [Consul documentation](../consul.md#restart-consul) for instructions on how to restart the service.
### `gitlab-ctl repmgr-check-master` command produces errors
......
......@@ -68,7 +68,7 @@ the example below, we add two more mount points that are named `nfs_1` and `nfs_
respectively.
NOTE: **Note:**
This example uses NFS. We do not recommend using EFS for storage as it may impact GitLab's performance. See the [relevant documentation](high_availability/nfs.md#avoid-using-awss-elastic-file-system-efs) for more details.
This example uses NFS. We do not recommend using EFS for storage as it may impact GitLab's performance. See the [relevant documentation](nfs.md#avoid-using-awss-elastic-file-system-efs) for more details.
**For installations from source**
......
---
type: reference
---
# Configuring Sidekiq
This section discusses how to configure an external Sidekiq instance using the
bundled Sidekiq in the GitLab package.
Sidekiq requires connection to the Redis, PostgreSQL and Gitaly instance.
To configure the Sidekiq node:
1. SSH into the Sidekiq server.
1. [Download/install](https://about.gitlab.com/install/) the Omnibus GitLab package
you want using steps 1 and 2 from the GitLab downloads page.
**Do not complete any other steps on the download page.**
1. Open `/etc/gitlab/gitlab.rb` with your editor.
1. Generate the Sidekiq configuration:
```ruby
sidekiq['listen_address'] = "10.10.1.48"
## Optional: Enable extra Sidekiq processes
sidekiq_cluster['enable'] = true
sidekiq_cluster['enable'] = true
"elastic_indexer"
]
```
1. Setup Sidekiq's connection to Redis:
```ruby
## Must be the same in every sentinel node
redis['master_name'] = 'gitlab-redis'
## The same password for Redis authentication you set up for the master node.
redis['master_password'] = 'YOUR_PASSOWORD'
## A list of sentinels with `host` and `port`
gitlab_rails['redis_sentinels'] = [
{'host' => '10.10.1.34', 'port' => 26379},
{'host' => '10.10.1.35', 'port' => 26379},
{'host' => '10.10.1.36', 'port' => 26379},
]
```
1. Set up Sidekiq's connection to Gitaly:
```ruby
git_data_dirs({
'default' => { 'gitaly_address' => 'tcp://gitaly:8075' },
})
gitlab_rails['gitaly_token'] = 'YOUR_TOKEN'
```
1. Set up Sidekiq's connection to PostgreSQL:
```ruby
gitlab_rails['db_host'] = '10.10.1.30'
gitlab_rails['db_password'] = 'YOUR_PASSOWORD'
gitlab_rails['db_port'] = '5432'
gitlab_rails['db_adapter'] = 'postgresql'
gitlab_rails['db_encoding'] = 'unicode'
gitlab_rails['auto_migrate'] = false
```
Remember to add the Sidekiq nodes to PostgreSQL's trusted addresses:
```ruby
postgresql['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.10.1.30/32 10.10.1.31/32 10.10.1.32/32 10.10.1.33/32 10.10.1.38/32)
```
1. Disable other services:
```ruby
nginx['enable'] = false
grafana['enable'] = false
prometheus['enable'] = false
gitlab_rails['auto_migrate'] = false
alertmanager['enable'] = false
gitaly['enable'] = false
gitlab_monitor['enable'] = false
gitlab_workhorse['enable'] = false
nginx['enable'] = false
postgres_exporter['enable'] = false
postgresql['enable'] = false
redis['enable'] = false
redis_exporter['enable'] = false
puma['enable'] = false
gitlab_exporter['enable'] = false
```
1. Run `gitlab-ctl reconfigure`.
NOTE: **Note:**
You will need to restart the Sidekiq nodes after an update has occurred and database
migrations performed.
## Example configuration
Here's what the ending `/etc/gitlab/gitlab.rb` would look like:
```ruby
########################################
##### Services Disabled ###
########################################
nginx['enable'] = false
grafana['enable'] = false
prometheus['enable'] = false
gitlab_rails['auto_migrate'] = false
alertmanager['enable'] = false
gitaly['enable'] = false
gitlab_monitor['enable'] = false
gitlab_workhorse['enable'] = false
nginx['enable'] = false
postgres_exporter['enable'] = false
postgresql['enable'] = false
redis['enable'] = false
redis_exporter['enable'] = false
puma['enable'] = false
gitlab_exporter['enable'] = false
########################################
#### Redis ###
########################################
## Must be the same in every sentinel node
redis['master_name'] = 'gitlab-redis'
## The same password for Redis authentication you set up for the master node.
redis['master_password'] = 'YOUR_PASSOWORD'
## A list of sentinels with `host` and `port`
gitlab_rails['redis_sentinels'] = [
{'host' => '10.10.1.34', 'port' => 26379},
{'host' => '10.10.1.35', 'port' => 26379},
{'host' => '10.10.1.36', 'port' => 26379},
]
#######################################
### Gitaly ###
#######################################
git_data_dirs({
'default' => { 'gitaly_address' => 'tcp://gitaly:8075' },
})
gitlab_rails['gitaly_token'] = 'YOUR_TOKEN'
#######################################
### Postgres ###
#######################################
gitlab_rails['db_host'] = '10.10.1.30'
gitlab_rails['db_password'] = 'YOUR_PASSOWORD'
gitlab_rails['db_port'] = '5432'
gitlab_rails['db_adapter'] = 'postgresql'
gitlab_rails['db_encoding'] = 'unicode'
gitlab_rails['auto_migrate'] = false
#######################################
### Sidekiq configuration ###
#######################################
sidekiq['listen_address'] = "10.10.1.48"
#######################################
### Monitoring configuration ###
#######################################
consul['enable'] = true
consul['monitoring_service_discovery'] = true
consul['configuration'] = {
bind_addr: '10.10.1.48',
retry_join: %w(10.10.1.34 10.10.1.35 10.10.1.36)
}
# Set the network addresses that the exporters will listen on
node_exporter['listen_address'] = '10.10.1.48:9100'
# Rails Status for prometheus
gitlab_rails['monitoring_whitelist'] = ['10.10.1.42', '127.0.0.1']
```
## Further reading
Related Sidekiq configuration:
1. [Extra Sidekiq processes](operations/extra_sidekiq_processes.md)
1. [Using the GitLab-Sidekiq chart](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/)
......@@ -4475,11 +4475,6 @@ type EpicIssue implements Noteable {
"""
state: IssueState!
"""
Indicates whether an issue is published to the status page
"""
statusPagePublishedIncident: Boolean
"""
Indicates the currently logged in user is subscribed to the issue
"""
......@@ -6113,11 +6108,6 @@ type Issue implements Noteable {
"""
state: IssueState!
"""
Indicates whether an issue is published to the status page
"""
statusPagePublishedIncident: Boolean
"""
Indicates the currently logged in user is subscribed to the issue
"""
......
......@@ -12471,20 +12471,6 @@
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "statusPagePublishedIncident",
"description": "Indicates whether an issue is published to the status page",
"args": [
],
"type": {
"kind": "SCALAR",
"name": "Boolean",
"ofType": null
},
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "subscribed",
"description": "Indicates the currently logged in user is subscribed to the issue",
......@@ -16841,20 +16827,6 @@
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "statusPagePublishedIncident",
"description": "Indicates whether an issue is published to the status page",
"args": [
],
"type": {
"kind": "SCALAR",
"name": "Boolean",
"ofType": null
},
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "subscribed",
"description": "Indicates the currently logged in user is subscribed to the issue",
......@@ -750,7 +750,6 @@ Relationship between an epic and an issue
| `relationPath` | String | URI path of the epic-issue relation |
| `relativePosition` | Int | Relative position of the issue (used for positioning in epic tree and issue boards) |
| `state` | IssueState! | State of the issue |
| `statusPagePublishedIncident` | Boolean | Indicates whether an issue is published to the status page |
| `subscribed` | Boolean! | Indicates the currently logged in user is subscribed to the issue |
| `taskCompletionStatus` | TaskCompletionStatus! | Task completion status of the issue |
| `timeEstimate` | Int! | Time estimate of the issue |
......@@ -917,7 +916,6 @@ Represents a Group Member
| `reference` | String! | Internal reference of the issue. Returned in shortened format by default |
| `relativePosition` | Int | Relative position of the issue (used for positioning in epic tree and issue boards) |
| `state` | IssueState! | State of the issue |
| `statusPagePublishedIncident` | Boolean | Indicates whether an issue is published to the status page |
| `subscribed` | Boolean! | Indicates the currently logged in user is subscribed to the issue |
| `taskCompletionStatus` | TaskCompletionStatus! | Task completion status of the issue |
| `timeEstimate` | Int! | Time estimate of the issue |
......
......@@ -247,7 +247,7 @@ GitLab can be considered to have two layers from a process perspective:
- [Project page](https://github.com/hashicorp/consul/blob/master/README.md)
- Configuration:
- [Omnibus](../administration/high_availability/consul.md)
- [Omnibus](../administration/consul.md)
- [Charts](https://docs.gitlab.com/charts/installation/deployment.html#postgresql)
- Layer: Core Service (Data)
- GitLab.com: [Consul](../user/gitlab_com/index.md#consul)
......@@ -467,7 +467,7 @@ NGINX has an Ingress port for all HTTP requests and routes them to the appropria
- [Project page](https://github.com/pgbouncer/pgbouncer/blob/master/README.md)
- Configuration:
- [Omnibus](../administration/high_availability/pgbouncer.md)
- [Omnibus](../administration/postgresql/pgbouncer.md)
- [Charts](https://docs.gitlab.com/charts/installation/deployment.html#postgresql)
- Layer: Core Service (Data)
- GitLab.com: [Database Architecture](https://about.gitlab.com/handbook/engineering/infrastructure/production/architecture/#database-architecture)
......
......@@ -33,32 +33,28 @@ tempted to organize the `enum` as the following:
```ruby
# Define `failure_reason` enum in `Pipeline` model:
class Pipeline < ApplicationRecord
enum failure_reason: Enums::Pipeline.failure_reasons
enum failure_reason: ::PipelineEnums.failure_reasons
end
```
```ruby
# Define key/value pairs that used in FOSS and EE:
module Enums
module Pipeline
def self.failure_reasons
{ unknown_failure: 0, config_error: 1 }
end
module PipelineEnums
def self.failure_reasons
{ unknown_failure: 0, config_error: 1 }
end
end
Enums::Pipeline.prepend_if_ee('EE::Enums::Pipeline')
PipelineEnums.prepend_if_ee('EE::PipelineEnums')
```
```ruby
# Define key/value pairs that used in EE only:
module EE
module Enums
module Pipeline
override :failure_reasons
def failure_reasons
super.merge(activity_limit_exceeded: 2)
end
module PipelineEnums
override :failure_reasons
def failure_reasons
super.merge(activity_limit_exceeded: 2)
end
end
end
......@@ -67,7 +63,7 @@ end
This works as-is, however, it has a couple of downside that:
- Someone could define a key/value pair in EE that is **conflicted** with a value defined in FOSS.
e.g. Define `activity_limit_exceeded: 1` in `EE::Enums::Pipeline`.
e.g. Define `activity_limit_exceeded: 1` in `EE::PipelineEnums`.
- When it happens, the feature works totally different.
e.g. We cannot figure out `failure_reason` is either `config_error` or `activity_limit_exceeded`.
- When it happens, we have to ship a database migration to fix the data integrity,
......@@ -78,12 +74,10 @@ For example, this example sets `1000` as the offset:
```ruby
module EE
module Enums
module Pipeline
override :failure_reasons
def failure_reasons
super.merge(activity_limit_exceeded: 1_000, size_limit_exceeded: 1_001)
end
module PipelineEnums
override :failure_reasons
def failure_reasons
super.merge(activity_limit_exceeded: 1_000, size_limit_exceeded: 1_001)
end
end
end
......
......@@ -650,6 +650,10 @@ aren't in the message with ID `1 pipeline`.
## Adding a new language
NOTE: **Note:**
[Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/221012) in GitLab 13.3:
Languages with less than 2% of translations won't be available in the UI.
Let's suppose you want to add translations for a new language, let's say French.
1. The first step is to register the new language in `lib/gitlab/i18n.rb`:
......
......@@ -575,7 +575,7 @@ Let's create an EC2 instance where we'll install Gitaly:
1. Finally, acknowledge that you have access to the selected private key file or create a new one. Click **Launch Instances**.
NOTE: **Note:**
Instead of storing configuration _and_ repository data on the root volume, you can also choose to add an additional EBS volume for repository storage. Follow the same guidance as above. See the [Amazon EBS pricing](https://aws.amazon.com/ebs/pricing/). We do not recommend using EFS as it may negatively impact GitLab’s performance. You can review the [relevant documentation](../../administration/high_availability/nfs.md#avoid-using-awss-elastic-file-system-efs) for more details.
Instead of storing configuration _and_ repository data on the root volume, you can also choose to add an additional EBS volume for repository storage. Follow the same guidance as above. See the [Amazon EBS pricing](https://aws.amazon.com/ebs/pricing/). We do not recommend using EFS as it may negatively impact GitLab’s performance. You can review the [relevant documentation](../../administration/nfs.md#avoid-using-awss-elastic-file-system-efs) for more details.
Now that we have our EC2 instance ready, follow the [documentation to install GitLab and set up Gitaly on its own server](../../administration/gitaly/index.md#run-gitaly-on-its-own-server). Perform the client setup steps from that document on the [GitLab instance we created](#install-gitlab) above.
......
......@@ -91,7 +91,7 @@ Apart from a local hard drive you can also mount a volume that supports the netw
If you have enough RAM and a recent CPU the speed of GitLab is mainly limited by hard drive seek times. Having a fast drive (7200 RPM and up) or a solid state drive (SSD) will improve the responsiveness of GitLab.
NOTE: **Note:**
Since file system performance may affect GitLab's overall performance, [we don't recommend using AWS EFS for storage](../administration/high_availability/nfs.md#avoid-using-awss-elastic-file-system-efs).
Since file system performance may affect GitLab's overall performance, [we don't recommend using AWS EFS for storage](../administration/nfs.md#avoid-using-awss-elastic-file-system-efs).
### CPU
......
......@@ -516,7 +516,7 @@ directory that you want to copy the tarballs to is the root of your mounted
directory, just use `.` instead.
NOTE: **Note:**
Since file system performance may affect GitLab's overall performance, we do not recommend using EFS for storage. See the [relevant documentation](../administration/high_availability/nfs.md#avoid-using-awss-elastic-file-system-efs) for more details.
Since file system performance may affect GitLab's overall performance, we do not recommend using EFS for storage. See the [relevant documentation](../administration/nfs.md#avoid-using-awss-elastic-file-system-efs) for more details.
For Omnibus GitLab packages:
......@@ -717,7 +717,7 @@ sure these directories are empty before attempting a restore. Otherwise GitLab
will attempt to move these directories before restoring the new data and this
would cause an error.
Read more on [configuring NFS mounts](../administration/high_availability/nfs.md)
Read more on [configuring NFS mounts](../administration/nfs.md)
### Restore for installation from source
......
......@@ -374,6 +374,8 @@ You can supply a custom root certificate to complete TLS verification by using t
specifying a `ca` setting in a [`.bowerrc`](https://bower.io/docs/config/#bowerrc-specification)
file.
### Configuring Bundler projects
#### Using private Bundler registries
If you have a private Bundler registry you can use the
......@@ -394,6 +396,39 @@ specifying a [`BUNDLE_SSL_CA_CERT`](https://bundler.io/v2.0/man/bundle-config.1.
[environment variable](../../../ci/variables/README.md#custom-environment-variables)
in the job definition.
### Configuring Composer projects
#### Using private Composer registries
If you have a private Composer registry you can use the
[`repositories`](https://getcomposer.org/doc/05-repositories.md)
setting to specify its location.
For example:
```json
{
"repositories": [
{ "packagist.org": false },
{
"type": "composer",
"url": "https://composer.example.com"
}
],
"require": {
"monolog/monolog": "1.0.*"
}
}
```
#### Custom root certificates for Composer
You can supply a custom root certificate to complete TLS verification by using the
`ADDITIONAL_CA_CERT_BUNDLE` [environment variable](#available-variables), or by
specifying a [`COMPOSER_CAFILE`](https://getcomposer.org/doc/03-cli.md#composer-cafile)
[environment variable](../../../ci/variables/README.md#custom-environment-variables)
in the job definition.
### Configuring Conan projects
You can configure [Conan](https://conan.io/) projects by adding a `.conan` directory to your
......
......@@ -9,8 +9,49 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/6861) in [GitLab Premium](https://about.gitlab.com/pricing/) 11.6.
When you create a new [project](../project/index.md), creating it based on custom project templates is
a convenient option.
Custom project templates are useful for organizations that need to create many similar types of [projects](../project/index.md) and want to start from the same jumping-off point.
## Setting up Group-level Project Templates
To use a custom project template for a new project you need to:
1. [Create a 'templates' subgroup](subgroups/index.md).
1. [Add repositories (projects) to the that new subgroup](index.md#add-projects-to-a-group), as your templates.
1. Edit your group's settings to look to your 'templates' subgroup for templates:
1. In the left-hand menu, click **{settings}** **Settings > General**.
NOTE: **Note:**
If you don't have access to the group's settings, you may not have sufficient privileges (for example, you may need developer or higher permissions).
1. Scroll to **Custom project templates** and click **Expand**. If no **Custom project templates** section displays, make sure you've created a subgroup, and added a project (repository) to it.
1. Select the 'templates' subgroup.
### Example structure
Here is a sample group/project structure for a hypothetical "Acme Co" for project templates:
```txt
# GitLab instance and group
gitlab.com/acmeco/
# Subgroups
internal
tools
# Subgroup for handling project templates
websites
templates
# Project templates
client-site-django
client-site-gatsby
client-site-hTML
# Other projects
client-site-a
client-site-b
client-site-c
...
```
### Adjust Settings
Users can configure a GitLab group that serves as template
source under a group's **Settings > General > Custom project templates**.
......
<script>
import { GlAvatarLink, GlAvatarLabeled, GlLink } from '@gitlab/ui';
import { __ } from '~/locale';
export default {
milestone: 'milestone',
assignee: 'assignee',
labelMilestoneText: __('Milestone'),
labelAssigneeText: __('Assignee'),
components: {
GlLink,
GlAvatarLink,
GlAvatarLabeled,
},
props: {
boardListType: {
type: String,
required: true,
},
activeList: {
type: Object,
required: true,
},
},
computed: {
activeListAssignee() {
return this.activeList.assignee;
},
activeListMilestone() {
return this.activeList.milestone;
},
listTypeTitle() {
switch (this.boardListType) {
case this.$options.milestone: {
return this.$options.labelMilestoneText;
}
case this.$options.assignee: {
return this.$options.labelAssigneeText;
}
default: {
return '';
}
}
},
},
};
</script>
<template>
<div>
<label class="js-list-label gl-display-block">{{ listTypeTitle }}</label>
<gl-link
v-if="boardListType === $options.milestone"
class="js-milestone"
:href="activeListMilestone.webUrl"
>{{ activeListMilestone.title }}</gl-link
>
<gl-avatar-link
v-else-if="boardListType === $options.assignee"
class="js-assignee"
:href="activeListAssignee.webUrl"
>
<gl-avatar-labeled
:size="32"
:label="activeListAssignee.name"
:sub-label="`@${activeListAssignee.username}`"
:src="activeListAssignee.avatar"
/>
</gl-avatar-link>
</div>
</template>
<script>
import { GlIcon } from '@gitlab/ui';
import { s__ } from '~/locale';
export default {
i18n: {
published: s__('IncidentManagement|Published to status page'),
},
components: {
GlIcon,
},
props: {
statusPagePublishedIncident: {
type: Boolean,
required: false,
default: null,
},
unPublished: {
type: String,
required: true,
},
},
};
</script>
<template>
<div data-testid="published-cell">
<gl-icon
v-if="statusPagePublishedIncident"
name="status_success"
class="gl-fill-green-500"
:aria-label="$options.i18n.published"
/>
<div v-else>{{ unPublished }}</div>
</div>
</template>
......@@ -14,10 +14,11 @@ module EE
return super unless ::Gitlab::Geo.secondary?
if @limited_actions_message
s_('Geo|You are on a secondary, <b>read-only</b> Geo node. You may be able to make a limited amount of changes or perform a limited amount of actions on this page.').html_safe
html_escape(s_('Geo|You are on a secondary, %{b_open}read-only%{b_close} Geo node. You may be able to make a limited amount of changes or perform a limited amount of actions on this page.')) %
{ b_open: '<b>'.html_safe, b_close: '</b>'.html_safe }
else
message = (s_('Geo|You are on a secondary, <b>read-only</b> Geo node. If you want to make changes, you must visit this page on the %{primary_node}.') %
{ primary_node: link_to('primary node', ::Gitlab::Geo.primary_node&.url || '#') }).html_safe
message = html_escape(s_('Geo|You are on a secondary, %{b_open}read-only%{b_close} Geo node. If you want to make changes, you must visit this page on the %{node_link_open}primary node%{node_link_close}.')) %
{ node_link_open: "<a href=\"#{::Gitlab::Geo.primary_node&.url || '#'}\">".html_safe, node_link_close: "</a>".html_safe, b_open: '<b>'.html_safe, b_close: '</b>'.html_safe }
return "#{message} #{lag_message}".html_safe if lag_message
......
# frozen_string_literal: true
module EE
module Projects
module IncidentsHelper
extend ::Gitlab::Utils::Override
override :incidents_data
def incidents_data(project)
super.merge(
incidents_data_published_available(project)
)
end
private
def incidents_data_published_available(project)
return {} unless project.feature_available?(:status_page)
{
'published-available' => 'true'
}
end
end
end
end
# frozen_string_literal: true
module EE
module Enums
module Ci
module Pipeline
extend ActiveSupport::Concern
class_methods do
extend ::Gitlab::Utils::Override
override :failure_reasons
def failure_reasons
super.merge(
activity_limit_exceeded: 20,
size_limit_exceeded: 21,
job_activity_limit_exceeded: 22
)
end
end
end
end
end
end
# frozen_string_literal: true
module EE
module Enums
module UserCallout
extend ActiveSupport::Concern
class_methods do
extend ::Gitlab::Utils::Override
# If you are going to add new items to this hash, check that you're not going
# to conflict with FOSS-only values: https://gitlab.com/gitlab-org/gitlab/blob/master/app/models/concerns/enums/user_callout.rb
override :feature_names
def feature_names
super.merge(
gold_trial: 4,
geo_enable_hashed_storage: 5,
geo_migrate_hashed_storage: 6,
canary_deployment: 7,
gold_trial_billings: 8,
threat_monitoring_info: 11,
account_recovery_regular_check: 12,
users_over_license_banner: 16,
standalone_vulnerabilities_introduction_banner: 17,
active_user_count_threshold: 18,
buy_pipeline_minutes_notification_dot: 19
)
end
end
end
end
end
# frozen_string_literal: true
module EE
module Enums
module InternalId
module Ci
module PipelineEnums
extend ActiveSupport::Concern
class_methods do
extend ::Gitlab::Utils::Override
override :usage_resources
def usage_resources
super.merge(requirements: 1000)
override :failure_reasons
def failure_reasons
super.merge(
activity_limit_exceeded: 20,
size_limit_exceeded: 21,
job_activity_limit_exceeded: 22
)
end
end
end
......
# frozen_string_literal: true
module EE
module InternalIdEnums
extend ActiveSupport::Concern
class_methods do
extend ::Gitlab::Utils::Override
override :usage_resources
def usage_resources
super.merge(requirements: 1000)
end
end
end
end
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment