Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
G
gitlab-ce
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
gitlab-ce
Commits
6f7881ee
Commit
6f7881ee
authored
Mar 24, 2020
by
GitLab Bot
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add latest changes from gitlab-org/gitlab@master
parent
8c8bf44f
Changes
43
Hide whitespace changes
Inline
Side-by-side
Showing
43 changed files
with
1438 additions
and
70 deletions
+1438
-70
.rubocop.yml
.rubocop.yml
+14
-1
GITALY_SERVER_VERSION
GITALY_SERVER_VERSION
+1
-1
app/assets/stylesheets/notify.scss
app/assets/stylesheets/notify.scss
+5
-1
app/models/clusters/applications/ingress.rb
app/models/clusters/applications/ingress.rb
+3
-3
app/models/clusters/applications/prometheus.rb
app/models/clusters/applications/prometheus.rb
+10
-2
app/models/project.rb
app/models/project.rb
+6
-0
app/models/user.rb
app/models/user.rb
+1
-0
app/models/user_canonical_email.rb
app/models/user_canonical_email.rb
+8
-0
app/services/clusters/applications/check_upgrade_progress_service.rb
...s/clusters/applications/check_upgrade_progress_service.rb
+71
-0
app/services/clusters/applications/prometheus_config_service.rb
...rvices/clusters/applications/prometheus_config_service.rb
+155
-0
app/services/clusters/applications/prometheus_update_service.rb
...rvices/clusters/applications/prometheus_update_service.rb
+35
-0
app/services/clusters/applications/schedule_update_service.rb
...services/clusters/applications/schedule_update_service.rb
+38
-0
app/services/users/build_service.rb
app/services/users/build_service.rb
+2
-0
app/services/users/update_canonical_email_service.rb
app/services/users/update_canonical_email_service.rb
+56
-0
app/services/users/update_service.rb
app/services/users/update_service.rb
+7
-0
app/workers/all_queues.yml
app/workers/all_queues.yml
+14
-0
app/workers/cluster_update_app_worker.rb
app/workers/cluster_update_app_worker.rb
+50
-0
app/workers/cluster_wait_for_app_update_worker.rb
app/workers/cluster_wait_for_app_update_worker.rb
+16
-0
changelogs/unreleased/fix_logic_for_ingress_can_uninstall.yml
...gelogs/unreleased/fix_logic_for_ingress_can_uninstall.yml
+5
-0
changelogs/unreleased/pokstad1-praefect-docs-reconcile-subcmd.yml
...gs/unreleased/pokstad1-praefect-docs-reconcile-subcmd.yml
+5
-0
db/migrate/20200214025454_add_canonical_emails.rb
db/migrate/20200214025454_add_canonical_emails.rb
+26
-0
db/structure.sql
db/structure.sql
+32
-0
doc/administration/gitaly/praefect.md
doc/administration/gitaly/praefect.md
+23
-0
doc/user/project/merge_requests/test_coverage_visualization.md
...ser/project/merge_requests/test_coverage_visualization.md
+1
-1
spec/factories/user_canonical_emails.rb
spec/factories/user_canonical_emails.rb
+8
-0
spec/frontend/notes/components/notes_app_spec.js
spec/frontend/notes/components/notes_app_spec.js
+0
-0
spec/frontend/u2f/authenticate_spec.js
spec/frontend/u2f/authenticate_spec.js
+33
-26
spec/frontend/u2f/mock_u2f_device.js
spec/frontend/u2f/mock_u2f_device.js
+0
-0
spec/frontend/u2f/register_spec.js
spec/frontend/u2f/register_spec.js
+26
-22
spec/models/clusters/applications/ingress_spec.rb
spec/models/clusters/applications/ingress_spec.rb
+46
-12
spec/models/clusters/applications/prometheus_spec.rb
spec/models/clusters/applications/prometheus_spec.rb
+45
-0
spec/models/project_spec.rb
spec/models/project_spec.rb
+18
-0
spec/models/user_canonical_email_spec.rb
spec/models/user_canonical_email_spec.rb
+20
-0
spec/services/clusters/applications/check_upgrade_progress_service_spec.rb
...sters/applications/check_upgrade_progress_service_spec.rb
+94
-0
spec/services/clusters/applications/prometheus_config_service_spec.rb
...s/clusters/applications/prometheus_config_service_spec.rb
+158
-0
spec/services/clusters/applications/prometheus_update_service_spec.rb
...s/clusters/applications/prometheus_update_service_spec.rb
+92
-0
spec/services/clusters/applications/schedule_update_service_spec.rb
...ces/clusters/applications/schedule_update_service_spec.rb
+37
-0
spec/services/users/build_service_spec.rb
spec/services/users/build_service_spec.rb
+8
-0
spec/services/users/create_service_spec.rb
spec/services/users/create_service_spec.rb
+2
-1
spec/services/users/update_canonical_email_service_spec.rb
spec/services/users/update_canonical_email_service_spec.rb
+116
-0
spec/services/users/update_service_spec.rb
spec/services/users/update_service_spec.rb
+26
-0
spec/workers/cluster_update_app_worker_spec.rb
spec/workers/cluster_update_app_worker_spec.rb
+98
-0
spec/workers/cluster_wait_for_app_update_worker_spec.rb
spec/workers/cluster_wait_for_app_update_worker_spec.rb
+27
-0
No files found.
.rubocop.yml
View file @
6f7881ee
...
...
@@ -331,8 +331,21 @@ RSpec/LeakyConstantDeclaration:
Enabled
:
true
Exclude
:
-
'
spec/**/*.rb'
-
'
ee/spec/**/*.rb'
-
'
qa/spec/**/*.rb'
-
'
ee/spec/lib/gitlab/geo/log_cursor/logger_spec.rb'
-
'
ee/spec/lib/gitlab/geo/log_helpers_spec.rb'
-
'
ee/spec/lib/gitlab/geo/replicator_spec.rb'
-
'
ee/spec/mailers/emails/service_desk_spec.rb'
-
'
ee/spec/migrations/remove_creations_in_gitlab_subscription_histories_spec.rb'
-
'
ee/spec/migrations/set_resolved_state_on_vulnerabilities_spec.rb'
-
'
ee/spec/models/repository_spec.rb'
-
'
ee/spec/presenters/security/vulnerable_project_presenter_spec.rb'
-
'
ee/spec/serializers/vulnerable_project_entity_spec.rb'
-
'
ee/spec/services/clusters/applications/check_upgrade_progress_service_spec.rb'
-
'
ee/spec/services/dashboard/projects/list_service_spec.rb'
-
'
ee/spec/services/metrics/dashboard/clone_dashboard_service_spec.rb'
-
'
ee/spec/support/shared_contexts/epic_aggregate_constants.rb'
-
'
ee/spec/workers/elastic_namespace_rollout_worker_spec.rb'
RSpec/EmptyLineAfterHook
:
Enabled
:
false
...
...
GITALY_SERVER_VERSION
View file @
6f7881ee
12.9.0-rc
1
12.9.0-rc
5
app/assets/stylesheets/notify.scss
View file @
6f7881ee
...
...
@@ -20,7 +20,11 @@ pre.commit-message {
}
.gl-label-scoped
{
box-shadow
:
0
0
0
2px
currentColor
inset
;
border
:
2px
solid
currentColor
;
box-sizing
:
border-box
;
display
:
inline-block
;
height
:
17px
;
line-height
:
14px
;
}
.gl-label-text
{
...
...
app/models/clusters/applications/ingress.rb
View file @
6f7881ee
...
...
@@ -50,7 +50,7 @@ module Clusters
end
def
allowed_to_uninstall?
external_ip_or_hostname?
&&
application_jupyter_nil_or_installable
?
external_ip_or_hostname?
&&
!
application_jupyter_installed
?
end
def
install_command
...
...
@@ -161,8 +161,8 @@ module Clusters
YAML
.
load_file
(
chart_values_file
).
deep_merge!
(
specification
)
end
def
application_jupyter_
nil_or_installable
?
cluster
.
application_jupyter
.
nil?
||
cluster
.
application_jupyter
&
.
installable
?
def
application_jupyter_
installed
?
cluster
.
application_jupyter
&
.
installed
?
end
def
modsecurity_snippet_content
...
...
app/models/clusters/applications/prometheus.rb
View file @
6f7881ee
...
...
@@ -35,6 +35,16 @@ module Clusters
.
perform_async
(
application
.
cluster_id
,
::
PrometheusService
.
to_param
)
# rubocop:disable CodeReuse/ServiceClass
end
end
after_transition
any
=>
:updating
do
|
application
|
application
.
update
(
last_update_started_at:
Time
.
now
)
end
end
def
updated_since?
(
timestamp
)
last_update_started_at
&&
last_update_started_at
>
timestamp
&&
!
update_errored?
end
def
chart
...
...
@@ -148,5 +158,3 @@ module Clusters
end
end
end
Clusters
::
Applications
::
Prometheus
.
prepend_if_ee
(
'EE::Clusters::Applications::Prometheus'
)
app/models/project.rb
View file @
6f7881ee
...
...
@@ -2411,6 +2411,12 @@ class Project < ApplicationRecord
branch_protection
.
fully_protected?
||
branch_protection
.
developer_can_merge?
end
def
environments_for_scope
(
scope
)
quoted_scope
=
::
Gitlab
::
SQL
::
Glob
.
q
(
scope
)
environments
.
where
(
"name LIKE (
#{
::
Gitlab
::
SQL
::
Glob
.
to_like
(
quoted_scope
)
}
)"
)
# rubocop:disable GitlabSecurity/SqlInjection
end
private
def
closest_namespace_setting
(
name
)
...
...
app/models/user.rb
View file @
6f7881ee
...
...
@@ -168,6 +168,7 @@ class User < ApplicationRecord
has_one
:user_preference
has_one
:user_detail
has_one
:user_highest_role
has_one
:user_canonical_email
#
# Validations
...
...
app/models/user_canonical_email.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
class
UserCanonicalEmail
<
ApplicationRecord
validates
:canonical_email
,
presence:
true
validates
:canonical_email
,
format:
{
with:
Devise
.
email_regexp
}
belongs_to
:user
,
inverse_of: :user_canonical_email
end
app/services/clusters/applications/check_upgrade_progress_service.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
module
Clusters
module
Applications
class
CheckUpgradeProgressService
<
BaseHelmService
def
execute
return
unless
app
.
updating?
case
phase
when
::
Gitlab
::
Kubernetes
::
Pod
::
SUCCEEDED
on_success
when
::
Gitlab
::
Kubernetes
::
Pod
::
FAILED
on_failed
else
check_timeout
end
rescue
::
Kubeclient
::
HttpError
=>
e
app
.
make_update_errored!
(
"Kubernetes error:
#{
e
.
message
}
"
)
unless
app
.
update_errored?
end
private
def
on_success
app
.
make_installed!
ensure
remove_pod
end
def
on_failed
app
.
make_update_errored!
(
errors
||
'Update silently failed'
)
ensure
remove_pod
end
def
check_timeout
if
timed_out?
begin
app
.
make_update_errored!
(
'Update timed out'
)
ensure
remove_pod
end
else
::
ClusterWaitForAppUpdateWorker
.
perform_in
(
::
ClusterWaitForAppUpdateWorker
::
INTERVAL
,
app
.
name
,
app
.
id
)
end
end
def
timed_out?
Time
.
now
.
utc
-
app
.
updated_at
.
to_time
.
utc
>
::
ClusterWaitForAppUpdateWorker
::
TIMEOUT
end
def
remove_pod
helm_api
.
delete_pod!
(
pod_name
)
rescue
# no-op
end
def
phase
helm_api
.
status
(
pod_name
)
end
def
errors
helm_api
.
log
(
pod_name
)
end
def
pod_name
@pod_name
||=
patch_command
.
pod_name
end
end
end
end
app/services/clusters/applications/prometheus_config_service.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
module
Clusters
module
Applications
class
PrometheusConfigService
def
initialize
(
project
,
cluster
,
app
)
@project
=
project
@cluster
=
cluster
@app
=
app
end
def
execute
(
config
=
{})
if
has_alerts?
generate_alert_manager
(
config
)
else
reset_alert_manager
(
config
)
end
end
private
attr_reader
:project
,
:cluster
,
:app
def
reset_alert_manager
(
config
)
config
=
set_alert_manager_enabled
(
config
,
false
)
config
.
delete
(
'alertmanagerFiles'
)
config
[
'serverFiles'
]
||=
{}
config
[
'serverFiles'
][
'alerts'
]
=
{}
config
end
def
generate_alert_manager
(
config
)
config
=
set_alert_manager_enabled
(
config
,
true
)
config
=
set_alert_manager_files
(
config
)
set_alert_manager_groups
(
config
)
end
def
set_alert_manager_enabled
(
config
,
enabled
)
config
[
'alertmanager'
]
||=
{}
config
[
'alertmanager'
][
'enabled'
]
=
enabled
config
end
def
set_alert_manager_files
(
config
)
config
[
'alertmanagerFiles'
]
=
{
'alertmanager.yml'
=>
{
'receivers'
=>
alert_manager_receivers_params
,
'route'
=>
alert_manager_route_params
}
}
config
end
def
set_alert_manager_groups
(
config
)
config
[
'serverFiles'
]
||=
{}
config
[
'serverFiles'
][
'alerts'
]
||=
{}
config
[
'serverFiles'
][
'alerts'
][
'groups'
]
||=
[]
environments_with_alerts
.
each
do
|
env_name
,
alerts
|
index
=
config
[
'serverFiles'
][
'alerts'
][
'groups'
].
find_index
do
|
group
|
group
[
'name'
]
==
env_name
end
if
index
config
[
'serverFiles'
][
'alerts'
][
'groups'
][
index
][
'rules'
]
=
alerts
else
config
[
'serverFiles'
][
'alerts'
][
'groups'
]
<<
{
'name'
=>
env_name
,
'rules'
=>
alerts
}
end
end
config
end
def
alert_manager_receivers_params
[
{
'name'
=>
'gitlab'
,
'webhook_configs'
=>
[
{
'url'
=>
notify_url
,
'send_resolved'
=>
true
,
'http_config'
=>
{
'bearer_token'
=>
alert_manager_token
}
}
]
}
]
end
def
alert_manager_token
app
.
generate_alert_manager_token!
app
.
alert_manager_token
end
def
alert_manager_route_params
{
'receiver'
=>
'gitlab'
,
'group_wait'
=>
'30s'
,
'group_interval'
=>
'5m'
,
'repeat_interval'
=>
'4h'
}
end
def
notify_url
::
Gitlab
::
Routing
.
url_helpers
.
notify_project_prometheus_alerts_url
(
project
,
format: :json
)
end
def
has_alerts?
environments_with_alerts
.
values
.
flatten
(
1
).
any?
end
def
environments_with_alerts
@environments_with_alerts
||=
environments
.
each_with_object
({})
do
|
environment
,
hash
|
name
=
rule_name
(
environment
)
hash
[
name
]
=
alerts
(
environment
)
end
end
def
rule_name
(
environment
)
"
#{
environment
.
name
}
.rules"
end
def
alerts
(
environment
)
variables
=
Gitlab
::
Prometheus
::
QueryVariables
.
call
(
environment
)
alerts
=
Projects
::
Prometheus
::
AlertsFinder
.
new
(
environment:
environment
)
.
execute
alerts
.
map
do
|
alert
|
substitute_query_variables
(
alert
.
to_param
,
variables
)
end
end
def
substitute_query_variables
(
hash
,
variables
)
hash
[
'expr'
]
%=
variables
hash
end
def
environments
project
.
environments_for_scope
(
cluster
.
environment_scope
)
end
end
end
end
app/services/clusters/applications/prometheus_update_service.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
module
Clusters
module
Applications
class
PrometheusUpdateService
<
BaseHelmService
attr_accessor
:project
def
initialize
(
app
,
project
)
super
(
app
)
@project
=
project
end
def
execute
app
.
make_updating!
helm_api
.
update
(
patch_command
(
values
))
::
ClusterWaitForAppUpdateWorker
.
perform_in
(
::
ClusterWaitForAppUpdateWorker
::
INTERVAL
,
app
.
name
,
app
.
id
)
rescue
::
Kubeclient
::
HttpError
=>
ke
app
.
make_update_errored!
(
"Kubernetes error:
#{
ke
.
message
}
"
)
rescue
StandardError
=>
e
app
.
make_update_errored!
(
e
.
message
)
end
private
def
values
PrometheusConfigService
.
new
(
project
,
cluster
,
app
)
.
execute
.
to_yaml
end
end
end
end
app/services/clusters/applications/schedule_update_service.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
module
Clusters
module
Applications
class
ScheduleUpdateService
BACKOFF_DELAY
=
2
.
minutes
attr_accessor
:application
,
:project
def
initialize
(
application
,
project
)
@application
=
application
@project
=
project
end
def
execute
return
unless
application
if
recently_scheduled?
worker_class
.
perform_in
(
BACKOFF_DELAY
,
application
.
name
,
application
.
id
,
project
.
id
,
Time
.
now
)
else
worker_class
.
perform_async
(
application
.
name
,
application
.
id
,
project
.
id
,
Time
.
now
)
end
end
private
def
worker_class
::
ClusterUpdateAppWorker
end
def
recently_scheduled?
return
false
unless
application
.
last_update_started_at
application
.
last_update_started_at
.
utc
>=
Time
.
now
.
utc
-
BACKOFF_DELAY
end
end
end
end
app/services/users/build_service.rb
View file @
6f7881ee
...
...
@@ -30,6 +30,8 @@ module Users
build_identity
(
user
)
Users
::
UpdateCanonicalEmailService
.
new
(
user:
user
).
execute
user
end
...
...
app/services/users/update_canonical_email_service.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
module
Users
class
UpdateCanonicalEmailService
extend
ActiveSupport
::
Concern
INCLUDED_DOMAINS_PATTERN
=
[
/gmail.com/
].
freeze
def
initialize
(
user
:)
raise
ArgumentError
.
new
(
"Please provide a user"
)
unless
user
&
.
is_a?
(
User
)
@user
=
user
end
def
execute
return
unless
user
.
email
return
unless
user
.
email
.
match?
Devise
.
email_regexp
canonical_email
=
canonicalize_email
unless
canonical_email
# the canonical email doesn't exist, probably because the domain doesn't match
# destroy any UserCanonicalEmail record associated with this user
user
.
user_canonical_email
&
.
delete
# nothing else to do here
return
end
if
user
.
user_canonical_email
# update to the new value
user
.
user_canonical_email
.
canonical_email
=
canonical_email
else
user
.
build_user_canonical_email
(
canonical_email:
canonical_email
)
end
end
private
attr_reader
:user
def
canonicalize_email
email
=
user
.
email
portions
=
email
.
split
(
'@'
)
username
=
portions
.
shift
rest
=
portions
.
join
regex
=
Regexp
.
union
(
INCLUDED_DOMAINS_PATTERN
)
return
unless
regex
.
match?
(
rest
)
no_dots
=
username
.
tr
(
'.'
,
''
)
before_plus
=
no_dots
.
split
(
'+'
)[
0
]
"
#{
before_plus
}
@
#{
rest
}
"
end
end
end
app/services/users/update_service.rb
View file @
6f7881ee
...
...
@@ -21,6 +21,7 @@ module Users
discard_read_only_attributes
assign_attributes
assign_identity
build_canonical_email
if
@user
.
save
(
validate:
validate
)
&&
update_status
notify_success
(
user_exists
)
...
...
@@ -40,6 +41,12 @@ module Users
private
def
build_canonical_email
return
unless
@user
.
email_changed?
Users
::
UpdateCanonicalEmailService
.
new
(
user:
@user
).
execute
end
def
update_status
return
true
unless
@status_params
...
...
app/workers/all_queues.yml
View file @
6f7881ee
...
...
@@ -325,6 +325,13 @@
:resource_boundary: :unknown
:weight:
1
:idempotent:
-
:name: gcp_cluster:cluster_update_app
:feature_category: :kubernetes_management
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight:
1
:idempotent:
-
:name: gcp_cluster:cluster_upgrade_app
:feature_category: :kubernetes_management
:has_external_dependencies:
true
...
...
@@ -339,6 +346,13 @@
:resource_boundary: :cpu
:weight:
1
:idempotent:
-
:name: gcp_cluster:cluster_wait_for_app_update
:feature_category: :kubernetes_management
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight:
1
:idempotent:
-
:name: gcp_cluster:cluster_wait_for_ingress_ip_address
:feature_category: :kubernetes_management
:has_external_dependencies:
true
...
...
app/workers/cluster_update_app_worker.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
class
ClusterUpdateAppWorker
# rubocop:disable Scalability/IdempotentWorker
UpdateAlreadyInProgressError
=
Class
.
new
(
StandardError
)
include
ApplicationWorker
include
ClusterQueue
include
ClusterApplications
include
ExclusiveLeaseGuard
sidekiq_options
retry:
3
,
dead:
false
LEASE_TIMEOUT
=
10
.
minutes
.
to_i
def
perform
(
app_name
,
app_id
,
project_id
,
scheduled_time
)
@app_id
=
app_id
try_obtain_lease
do
execute
(
app_name
,
app_id
,
project_id
,
scheduled_time
)
end
end
private
# rubocop: disable CodeReuse/ActiveRecord
def
execute
(
app_name
,
app_id
,
project_id
,
scheduled_time
)
project
=
Project
.
find_by
(
id:
project_id
)
return
unless
project
find_application
(
app_name
,
app_id
)
do
|
app
|
update_prometheus
(
app
,
scheduled_time
,
project
)
end
end
# rubocop: enable CodeReuse/ActiveRecord
def
update_prometheus
(
app
,
scheduled_time
,
project
)
return
if
app
.
updated_since?
(
scheduled_time
)
return
if
app
.
update_in_progress?
Clusters
::
Applications
::
PrometheusUpdateService
.
new
(
app
,
project
).
execute
end
def
lease_key
@lease_key
||=
"
#{
self
.
class
.
name
.
underscore
}
-
#{
@app_id
}
"
end
def
lease_timeout
LEASE_TIMEOUT
end
end
app/workers/cluster_wait_for_app_update_worker.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
class
ClusterWaitForAppUpdateWorker
# rubocop:disable Scalability/IdempotentWorker
include
ApplicationWorker
include
ClusterQueue
include
ClusterApplications
INTERVAL
=
10
.
seconds
TIMEOUT
=
20
.
minutes
def
perform
(
app_name
,
app_id
)
find_application
(
app_name
,
app_id
)
do
|
app
|
::
Clusters
::
Applications
::
CheckUpgradeProgressService
.
new
(
app
).
execute
end
end
end
changelogs/unreleased/fix_logic_for_ingress_can_uninstall.yml
0 → 100644
View file @
6f7881ee
---
title
:
Fix logic for ingress can_uninstall?
merge_request
:
27729
author
:
type
:
fixed
changelogs/unreleased/pokstad1-praefect-docs-reconcile-subcmd.yml
0 → 100644
View file @
6f7881ee
---
title
:
Update Gitaly to 12.9.0-rc5
merge_request
:
27631
author
:
type
:
added
db/migrate/20200214025454_add_canonical_emails.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
class
AddCanonicalEmails
<
ActiveRecord
::
Migration
[
6.0
]
include
Gitlab
::
Database
::
MigrationHelpers
DOWNTIME
=
false
def
up
with_lock_retries
do
create_table
:user_canonical_emails
do
|
t
|
t
.
timestamps_with_timezone
t
.
references
:user
,
index:
false
,
null:
false
,
foreign_key:
{
on_delete: :cascade
}
t
.
string
:canonical_email
,
null:
false
,
index:
true
# rubocop:disable Migration/AddLimitToStringColumns
end
end
add_index
:user_canonical_emails
,
[
:user_id
,
:canonical_email
],
unique:
true
add_index
:user_canonical_emails
,
:user_id
,
unique:
true
end
def
down
with_lock_retries
do
drop_table
(
:user_canonical_emails
)
end
end
end
db/structure.sql
View file @
6f7881ee
...
...
@@ -6110,6 +6110,23 @@ CREATE SEQUENCE public.user_callouts_id_seq
ALTER
SEQUENCE
public
.
user_callouts_id_seq
OWNED
BY
public
.
user_callouts
.
id
;
CREATE
TABLE
public
.
user_canonical_emails
(
id
bigint
NOT
NULL
,
created_at
timestamp
with
time
zone
NOT
NULL
,
updated_at
timestamp
with
time
zone
NOT
NULL
,
user_id
bigint
NOT
NULL
,
canonical_email
character
varying
NOT
NULL
);
CREATE
SEQUENCE
public
.
user_canonical_emails_id_seq
START
WITH
1
INCREMENT
BY
1
NO
MINVALUE
NO
MAXVALUE
CACHE
1
;
ALTER
SEQUENCE
public
.
user_canonical_emails_id_seq
OWNED
BY
public
.
user_canonical_emails
.
id
;
CREATE
TABLE
public
.
user_custom_attributes
(
id
integer
NOT
NULL
,
created_at
timestamp
without
time
zone
NOT
NULL
,
...
...
@@ -7302,6 +7319,8 @@ ALTER TABLE ONLY public.user_agent_details ALTER COLUMN id SET DEFAULT nextval('
ALTER
TABLE
ONLY
public
.
user_callouts
ALTER
COLUMN
id
SET
DEFAULT
nextval
(
'public.user_callouts_id_seq'
::
regclass
);
ALTER
TABLE
ONLY
public
.
user_canonical_emails
ALTER
COLUMN
id
SET
DEFAULT
nextval
(
'public.user_canonical_emails_id_seq'
::
regclass
);
ALTER
TABLE
ONLY
public
.
user_custom_attributes
ALTER
COLUMN
id
SET
DEFAULT
nextval
(
'public.user_custom_attributes_id_seq'
::
regclass
);
ALTER
TABLE
ONLY
public
.
user_details
ALTER
COLUMN
user_id
SET
DEFAULT
nextval
(
'public.user_details_user_id_seq'
::
regclass
);
...
...
@@ -8206,6 +8225,9 @@ ALTER TABLE ONLY public.user_agent_details
ALTER
TABLE
ONLY
public
.
user_callouts
ADD
CONSTRAINT
user_callouts_pkey
PRIMARY
KEY
(
id
);
ALTER
TABLE
ONLY
public
.
user_canonical_emails
ADD
CONSTRAINT
user_canonical_emails_pkey
PRIMARY
KEY
(
id
);
ALTER
TABLE
ONLY
public
.
user_custom_attributes
ADD
CONSTRAINT
user_custom_attributes_pkey
PRIMARY
KEY
(
id
);
...
...
@@ -9963,6 +9985,12 @@ CREATE INDEX index_user_callouts_on_user_id ON public.user_callouts USING btree
CREATE
UNIQUE
INDEX
index_user_callouts_on_user_id_and_feature_name
ON
public
.
user_callouts
USING
btree
(
user_id
,
feature_name
);
CREATE
INDEX
index_user_canonical_emails_on_canonical_email
ON
public
.
user_canonical_emails
USING
btree
(
canonical_email
);
CREATE
UNIQUE
INDEX
index_user_canonical_emails_on_user_id
ON
public
.
user_canonical_emails
USING
btree
(
user_id
);
CREATE
UNIQUE
INDEX
index_user_canonical_emails_on_user_id_and_canonical_email
ON
public
.
user_canonical_emails
USING
btree
(
user_id
,
canonical_email
);
CREATE
INDEX
index_user_custom_attributes_on_key_and_value
ON
public
.
user_custom_attributes
USING
btree
(
key
,
value
);
CREATE
UNIQUE
INDEX
index_user_custom_attributes_on_user_id_and_key
ON
public
.
user_custom_attributes
USING
btree
(
user_id
,
key
);
...
...
@@ -11484,6 +11512,9 @@ ALTER TABLE ONLY public.labels
ALTER
TABLE
ONLY
public
.
project_feature_usages
ADD
CONSTRAINT
fk_rails_c22a50024b
FOREIGN
KEY
(
project_id
)
REFERENCES
public
.
projects
(
id
)
ON
DELETE
CASCADE
;
ALTER
TABLE
ONLY
public
.
user_canonical_emails
ADD
CONSTRAINT
fk_rails_c2bd828b51
FOREIGN
KEY
(
user_id
)
REFERENCES
public
.
users
(
id
)
ON
DELETE
CASCADE
;
ALTER
TABLE
ONLY
public
.
project_repositories
ADD
CONSTRAINT
fk_rails_c3258dc63b
FOREIGN
KEY
(
shard_id
)
REFERENCES
public
.
shards
(
id
)
ON
DELETE
RESTRICT
;
...
...
@@ -12614,6 +12645,7 @@ INSERT INTO "schema_migrations" (version) VALUES
(
'20200213204737'
),
(
'20200213220159'
),
(
'20200213220211'
),
(
'20200214025454'
),
(
'20200214034836'
),
(
'20200214085940'
),
(
'20200214214934'
),
...
...
doc/administration/gitaly/praefect.md
View file @
6f7881ee
...
...
@@ -682,6 +682,29 @@ for example behind a load balancer, `failover_enabled` should be disabled. The r
is no coordination that currently happens across different Praefect instances, so there could be a situation where
two Praefect instances think two different Gitaly nodes are the primary.
## Backend Node Recovery
When a Praefect backend node fails and is no longer able to
replicate changes, the backend node will start to drift from the primary. If
that node eventually recovers, it will need to be reconciled with the current
primary. The primary node is considered the single source of truth for the
state of a shard. The Praefect
`reconcile`
subcommand allows for the manual
reconciliation between a backend node and the current primary.
Run the following command on the Praefect server after all placeholders
(
`<virtual-storage>`
and
`<target-storage>`
) have been replaced:
```
shell
sudo
/opt/gitlab/embedded/bin/praefect
-config
/var/opt/gitlab/praefect/config.toml reconcile
-virtual
<virtual-storage>
-target
<target-storage>
```
-
Replace the placeholder
`<virtual-storage>`
with the virtual storage containing the backend node storage to be checked.
-
Replace the placeholder
`<target-storage>`
with the backend storage name.
The command will return a list of repositories that were found to be
inconsistent against the current primary. Each of these inconsistencies will
also be logged with an accompanying replication job ID.
## Grafana
Grafana is included with GitLab, and can be used to monitor your Praefect
...
...
doc/user/project/merge_requests/test_coverage_visualization.md
View file @
6f7881ee
...
...
@@ -67,7 +67,7 @@ test:
This feature comes with the
`:coverage_report_view`
feature flag disabled by
default. This feature is disabled due to some performance issues with very large
data sets. When
[
the performance issue
](
https://gitlab.com/gitlab-org/gitlab/issues/
37725
)
data sets. When
[
the performance issue
](
https://gitlab.com/gitlab-org/gitlab/issues/
211410
)
is resolved, the feature will be enabled by default.
To enable this feature, ask a GitLab administrator with Rails console access to
...
...
spec/factories/user_canonical_emails.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
FactoryBot
.
define
do
factory
:user_canonical_email
do
user
canonical_email
{
user
.
email
}
end
end
spec/frontend/notes/components/note_app_spec.js
→
spec/frontend/notes/components/note
s
_app_spec.js
View file @
6f7881ee
File moved
spec/
javascripts
/u2f/authenticate_spec.js
→
spec/
frontend
/u2f/authenticate_spec.js
View file @
6f7881ee
...
...
@@ -3,15 +3,19 @@ import U2FAuthenticate from '~/u2f/authenticate';
import
'
vendor/u2f
'
;
import
MockU2FDevice
from
'
./mock_u2f_device
'
;
describe
(
'
U2FAuthenticate
'
,
function
()
{
describe
(
'
U2FAuthenticate
'
,
()
=>
{
let
u2fDevice
;
let
container
;
let
component
;
preloadFixtures
(
'
u2f/authenticate.html
'
);
beforeEach
(()
=>
{
loadFixtures
(
'
u2f/authenticate.html
'
);
this
.
u2fDevice
=
new
MockU2FDevice
();
this
.
container
=
$
(
'
#js-authenticate-u2f
'
);
this
.
component
=
new
U2FAuthenticate
(
this
.
container
,
u2fDevice
=
new
MockU2FDevice
();
container
=
$
(
'
#js-authenticate-u2f
'
);
component
=
new
U2FAuthenticate
(
container
,
'
#js-login-u2f-form
'
,
{
sign_requests
:
[],
...
...
@@ -22,21 +26,23 @@ describe('U2FAuthenticate', function() {
});
describe
(
'
with u2f unavailable
'
,
()
=>
{
let
oldu2f
;
beforeEach
(()
=>
{
spyOn
(
this
.
component
,
'
switchToFallbackUI
'
);
this
.
oldu2f
=
window
.
u2f
;
jest
.
spyOn
(
component
,
'
switchToFallbackUI
'
).
mockImplementation
(()
=>
{}
);
oldu2f
=
window
.
u2f
;
window
.
u2f
=
null
;
});
afterEach
(()
=>
{
window
.
u2f
=
this
.
oldu2f
;
window
.
u2f
=
oldu2f
;
});
it
(
'
falls back to normal 2fa
'
,
done
=>
{
this
.
component
component
.
start
()
.
then
(()
=>
{
expect
(
this
.
component
.
switchToFallbackUI
).
toHaveBeenCalled
();
expect
(
component
.
switchToFallbackUI
).
toHaveBeenCalled
();
done
();
})
.
catch
(
done
.
fail
);
...
...
@@ -46,54 +52,55 @@ describe('U2FAuthenticate', function() {
describe
(
'
with u2f available
'
,
()
=>
{
beforeEach
(
done
=>
{
// bypass automatic form submission within renderAuthenticated
spyOn
(
this
.
component
,
'
renderAuthenticated
'
).
and
.
r
eturnValue
(
true
);
this
.
u2fDevice
=
new
MockU2FDevice
();
jest
.
spyOn
(
component
,
'
renderAuthenticated
'
).
mockR
eturnValue
(
true
);
u2fDevice
=
new
MockU2FDevice
();
this
.
component
component
.
start
()
.
then
(
done
)
.
catch
(
done
.
fail
);
});
it
(
'
allows authenticating via a U2F device
'
,
()
=>
{
const
inProgressMessage
=
this
.
container
.
find
(
'
p
'
);
const
inProgressMessage
=
container
.
find
(
'
p
'
);
expect
(
inProgressMessage
.
text
()).
toContain
(
'
Trying to communicate with your device
'
);
this
.
u2fDevice
.
respondToAuthenticateRequest
({
u2fDevice
.
respondToAuthenticateRequest
({
deviceData
:
'
this is data from the device
'
,
});
expect
(
this
.
component
.
renderAuthenticated
).
toHaveBeenCalledWith
(
expect
(
component
.
renderAuthenticated
).
toHaveBeenCalledWith
(
'
{"deviceData":"this is data from the device"}
'
,
);
});
describe
(
'
errors
'
,
()
=>
{
it
(
'
displays an error message
'
,
()
=>
{
const
setupButton
=
this
.
container
.
find
(
'
#js-login-u2f-device
'
);
const
setupButton
=
container
.
find
(
'
#js-login-u2f-device
'
);
setupButton
.
trigger
(
'
click
'
);
this
.
u2fDevice
.
respondToAuthenticateRequest
({
u2fDevice
.
respondToAuthenticateRequest
({
errorCode
:
'
error!
'
,
});
const
errorMessage
=
this
.
container
.
find
(
'
p
'
);
const
errorMessage
=
container
.
find
(
'
p
'
);
expect
(
errorMessage
.
text
()).
toContain
(
'
There was a problem communicating with your device
'
);
});
return
it
(
'
allows retrying authentication after an error
'
,
()
=>
{
let
setupButton
=
this
.
container
.
find
(
'
#js-login-u2f-device
'
);
it
(
'
allows retrying authentication after an error
'
,
()
=>
{
let
setupButton
=
container
.
find
(
'
#js-login-u2f-device
'
);
setupButton
.
trigger
(
'
click
'
);
this
.
u2fDevice
.
respondToAuthenticateRequest
({
u2fDevice
.
respondToAuthenticateRequest
({
errorCode
:
'
error!
'
,
});
const
retryButton
=
this
.
container
.
find
(
'
#js-u2f-try-again
'
);
const
retryButton
=
container
.
find
(
'
#js-u2f-try-again
'
);
retryButton
.
trigger
(
'
click
'
);
setupButton
=
this
.
container
.
find
(
'
#js-login-u2f-device
'
);
setupButton
=
container
.
find
(
'
#js-login-u2f-device
'
);
setupButton
.
trigger
(
'
click
'
);
this
.
u2fDevice
.
respondToAuthenticateRequest
({
u2fDevice
.
respondToAuthenticateRequest
({
deviceData
:
'
this is data from the device
'
,
});
expect
(
this
.
component
.
renderAuthenticated
).
toHaveBeenCalledWith
(
expect
(
component
.
renderAuthenticated
).
toHaveBeenCalledWith
(
'
{"deviceData":"this is data from the device"}
'
,
);
});
...
...
spec/
javascripts
/u2f/mock_u2f_device.js
→
spec/
frontend
/u2f/mock_u2f_device.js
View file @
6f7881ee
File moved
spec/
javascripts
/u2f/register_spec.js
→
spec/
frontend
/u2f/register_spec.js
View file @
6f7881ee
...
...
@@ -3,33 +3,37 @@ import U2FRegister from '~/u2f/register';
import
'
vendor/u2f
'
;
import
MockU2FDevice
from
'
./mock_u2f_device
'
;
describe
(
'
U2FRegister
'
,
function
()
{
describe
(
'
U2FRegister
'
,
()
=>
{
let
u2fDevice
;
let
container
;
let
component
;
preloadFixtures
(
'
u2f/register.html
'
);
beforeEach
(
done
=>
{
loadFixtures
(
'
u2f/register.html
'
);
this
.
u2fDevice
=
new
MockU2FDevice
();
this
.
container
=
$
(
'
#js-register-u2f
'
);
this
.
component
=
new
U2FRegister
(
this
.
container
,
$
(
'
#js-register-u2f-templates
'
),
{},
'
token
'
);
this
.
component
u2fDevice
=
new
MockU2FDevice
();
container
=
$
(
'
#js-register-u2f
'
);
component
=
new
U2FRegister
(
container
,
$
(
'
#js-register-u2f-templates
'
),
{},
'
token
'
);
component
.
start
()
.
then
(
done
)
.
catch
(
done
.
fail
);
});
it
(
'
allows registering a U2F device
'
,
()
=>
{
const
setupButton
=
this
.
container
.
find
(
'
#js-setup-u2f-device
'
);
const
setupButton
=
container
.
find
(
'
#js-setup-u2f-device
'
);
expect
(
setupButton
.
text
()).
toBe
(
'
Set up new U2F device
'
);
setupButton
.
trigger
(
'
click
'
);
const
inProgressMessage
=
this
.
container
.
children
(
'
p
'
);
const
inProgressMessage
=
container
.
children
(
'
p
'
);
expect
(
inProgressMessage
.
text
()).
toContain
(
'
Trying to communicate with your device
'
);
this
.
u2fDevice
.
respondToRegisterRequest
({
u2fDevice
.
respondToRegisterRequest
({
deviceData
:
'
this is data from the device
'
,
});
const
registeredMessage
=
this
.
container
.
find
(
'
p
'
);
const
deviceResponse
=
this
.
container
.
find
(
'
#js-device-response
'
);
const
registeredMessage
=
container
.
find
(
'
p
'
);
const
deviceResponse
=
container
.
find
(
'
#js-device-response
'
);
expect
(
registeredMessage
.
text
()).
toContain
(
'
Your device was successfully set up!
'
);
expect
(
deviceResponse
.
val
()).
toBe
(
'
{"deviceData":"this is data from the device"}
'
);
...
...
@@ -37,41 +41,41 @@ describe('U2FRegister', function() {
describe
(
'
errors
'
,
()
=>
{
it
(
"
doesn't allow the same device to be registered twice (for the same user
"
,
()
=>
{
const
setupButton
=
this
.
container
.
find
(
'
#js-setup-u2f-device
'
);
const
setupButton
=
container
.
find
(
'
#js-setup-u2f-device
'
);
setupButton
.
trigger
(
'
click
'
);
this
.
u2fDevice
.
respondToRegisterRequest
({
u2fDevice
.
respondToRegisterRequest
({
errorCode
:
4
,
});
const
errorMessage
=
this
.
container
.
find
(
'
p
'
);
const
errorMessage
=
container
.
find
(
'
p
'
);
expect
(
errorMessage
.
text
()).
toContain
(
'
already been registered with us
'
);
});
it
(
'
displays an error message for other errors
'
,
()
=>
{
const
setupButton
=
this
.
container
.
find
(
'
#js-setup-u2f-device
'
);
const
setupButton
=
container
.
find
(
'
#js-setup-u2f-device
'
);
setupButton
.
trigger
(
'
click
'
);
this
.
u2fDevice
.
respondToRegisterRequest
({
u2fDevice
.
respondToRegisterRequest
({
errorCode
:
'
error!
'
,
});
const
errorMessage
=
this
.
container
.
find
(
'
p
'
);
const
errorMessage
=
container
.
find
(
'
p
'
);
expect
(
errorMessage
.
text
()).
toContain
(
'
There was a problem communicating with your device
'
);
});
it
(
'
allows retrying registration after an error
'
,
()
=>
{
let
setupButton
=
this
.
container
.
find
(
'
#js-setup-u2f-device
'
);
let
setupButton
=
container
.
find
(
'
#js-setup-u2f-device
'
);
setupButton
.
trigger
(
'
click
'
);
this
.
u2fDevice
.
respondToRegisterRequest
({
u2fDevice
.
respondToRegisterRequest
({
errorCode
:
'
error!
'
,
});
const
retryButton
=
this
.
container
.
find
(
'
#U2FTryAgain
'
);
const
retryButton
=
container
.
find
(
'
#U2FTryAgain
'
);
retryButton
.
trigger
(
'
click
'
);
setupButton
=
this
.
container
.
find
(
'
#js-setup-u2f-device
'
);
setupButton
=
container
.
find
(
'
#js-setup-u2f-device
'
);
setupButton
.
trigger
(
'
click
'
);
this
.
u2fDevice
.
respondToRegisterRequest
({
u2fDevice
.
respondToRegisterRequest
({
deviceData
:
'
this is data from the device
'
,
});
const
registeredMessage
=
this
.
container
.
find
(
'
p
'
);
const
registeredMessage
=
container
.
find
(
'
p
'
);
expect
(
registeredMessage
.
text
()).
toContain
(
'
Your device was successfully set up!
'
);
});
...
...
spec/models/clusters/applications/ingress_spec.rb
View file @
6f7881ee
...
...
@@ -21,26 +21,60 @@ describe Clusters::Applications::Ingress do
describe
'#can_uninstall?'
do
subject
{
ingress
.
can_uninstall?
}
it
'returns true if external ip is set and no application exists'
do
ingress
.
external_ip
=
'IP'
context
'with jupyter installed'
do
before
do
create
(
:clusters_applications_jupyter
,
:installed
,
cluster:
ingress
.
cluster
)
end
i
s_expected
.
to
be_truthy
end
i
t
'returns false if external_ip_or_hostname? is true'
do
ingress
.
external_ip
=
'IP'
it
'returns false if application_jupyter_nil_or_installable? is false'
do
create
(
:clusters_applications_jupyter
,
:installed
,
cluster:
ingress
.
cluster
)
is_expected
.
to
be_falsey
end
is_expected
.
to
be_falsey
it
'returns false if external_ip_or_hostname? is false'
do
is_expected
.
to
be_falsey
end
end
it
'returns false if application_elastic_stack_nil_or_installable? is false'
do
create
(
:clusters_applications_elastic_stack
,
:installed
,
cluster:
ingress
.
cluster
)
context
'with jupyter installable'
do
before
do
create
(
:clusters_applications_jupyter
,
:installable
,
cluster:
ingress
.
cluster
)
end
it
'returns true if external_ip_or_hostname? is true'
do
ingress
.
external_ip
=
'IP'
is_expected
.
to
be_truthy
end
is_expected
.
to
be_falsey
it
'returns false if external_ip_or_hostname? is false'
do
is_expected
.
to
be_falsey
end
end
it
'returns false if external_ip_or_hostname? is false'
do
is_expected
.
to
be_falsey
context
'with jupyter nil'
do
it
'returns false if external_ip_or_hostname? is false'
do
is_expected
.
to
be_falsey
end
context
'if external_ip_or_hostname? is true'
do
context
'with IP'
do
before
do
ingress
.
external_ip
=
'IP'
end
it
{
is_expected
.
to
be_truthy
}
end
context
'with hostname'
do
before
do
ingress
.
external_hostname
=
'example.com'
end
it
{
is_expected
.
to
be_truthy
}
end
end
end
end
...
...
spec/models/clusters/applications/prometheus_spec.rb
View file @
6f7881ee
...
...
@@ -39,6 +39,19 @@ describe Clusters::Applications::Prometheus do
end
end
describe
'transition to updating'
do
let
(
:project
)
{
create
(
:project
)
}
let
(
:cluster
)
{
create
(
:cluster
,
projects:
[
project
])
}
subject
{
create
(
:clusters_applications_prometheus
,
:installed
,
cluster:
cluster
)
}
it
'sets last_update_started_at to now'
do
Timecop
.
freeze
do
expect
{
subject
.
make_updating
}.
to
change
{
subject
.
reload
.
last_update_started_at
}.
to
be_within
(
1
.
second
).
of
(
Time
.
now
)
end
end
end
describe
'#can_uninstall?'
do
let
(
:prometheus
)
{
create
(
:clusters_applications_prometheus
)
}
...
...
@@ -331,6 +344,38 @@ describe Clusters::Applications::Prometheus do
end
end
describe
'#updated_since?'
do
let
(
:cluster
)
{
create
(
:cluster
)
}
let
(
:prometheus_app
)
{
build
(
:clusters_applications_prometheus
,
cluster:
cluster
)
}
let
(
:timestamp
)
{
Time
.
now
-
5
.
minutes
}
around
do
|
example
|
Timecop
.
freeze
{
example
.
run
}
end
before
do
prometheus_app
.
last_update_started_at
=
Time
.
now
end
context
'when app does not have status failed'
do
it
'returns true when last update started after the timestamp'
do
expect
(
prometheus_app
.
updated_since?
(
timestamp
)).
to
be
true
end
it
'returns false when last update started before the timestamp'
do
expect
(
prometheus_app
.
updated_since?
(
Time
.
now
+
5
.
minutes
)).
to
be
false
end
end
context
'when app has status failed'
do
it
'returns false when last update started after the timestamp'
do
prometheus_app
.
status
=
6
expect
(
prometheus_app
.
updated_since?
(
timestamp
)).
to
be
false
end
end
end
describe
'alert manager token'
do
subject
{
create
(
:clusters_applications_prometheus
)
}
...
...
spec/models/project_spec.rb
View file @
6f7881ee
...
...
@@ -5938,6 +5938,24 @@ describe Project do
end
end
describe
'#environments_for_scope'
do
let_it_be
(
:project
,
reload:
true
)
{
create
(
:project
)
}
before
do
create_list
(
:environment
,
2
,
project:
project
)
end
it
'retrieves all project environments when using the * wildcard'
do
expect
(
project
.
environments_for_scope
(
"*"
)).
to
eq
(
project
.
environments
)
end
it
'retrieves a specific project environment when using the name of that environment'
do
environment
=
project
.
environments
.
first
expect
(
project
.
environments_for_scope
(
environment
.
name
)).
to
eq
([
environment
])
end
end
def
finish_job
(
export_job
)
export_job
.
start
export_job
.
finish
...
...
spec/models/user_canonical_email_spec.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
require
'spec_helper'
describe
UserCanonicalEmail
do
it
{
is_expected
.
to
belong_to
(
:user
)
}
describe
'validations'
do
describe
'canonical_email'
do
it
{
is_expected
.
to
validate_presence_of
(
:canonical_email
)
}
it
'validates email address'
,
:aggregate_failures
do
expect
(
build
(
:user_canonical_email
,
canonical_email:
'nonsense'
)).
not_to
be_valid
expect
(
build
(
:user_canonical_email
,
canonical_email:
'@nonsense'
)).
not_to
be_valid
expect
(
build
(
:user_canonical_email
,
canonical_email:
'@nonsense@'
)).
not_to
be_valid
expect
(
build
(
:user_canonical_email
,
canonical_email:
'nonsense@'
)).
not_to
be_valid
end
end
end
end
spec/services/clusters/applications/check_upgrade_progress_service_spec.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
require
'spec_helper'
describe
Clusters
::
Applications
::
CheckUpgradeProgressService
do
RESCHEDULE_PHASES
=
::
Gitlab
::
Kubernetes
::
Pod
::
PHASES
-
[
::
Gitlab
::
Kubernetes
::
Pod
::
SUCCEEDED
,
::
Gitlab
::
Kubernetes
::
Pod
::
FAILED
,
::
Gitlab
].
freeze
let
(
:application
)
{
create
(
:clusters_applications_prometheus
,
:updating
)
}
let
(
:service
)
{
described_class
.
new
(
application
)
}
let
(
:phase
)
{
::
Gitlab
::
Kubernetes
::
Pod
::
UNKNOWN
}
let
(
:errors
)
{
nil
}
shared_examples
'a terminated upgrade'
do
it
'removes the POD'
do
expect
(
service
).
to
receive
(
:remove_pod
).
once
service
.
execute
end
end
shared_examples
'a not yet terminated upgrade'
do
|
a_phase
|
let
(
:phase
)
{
a_phase
}
context
"when phase is
#{
a_phase
}
"
do
context
'when not timed out'
do
it
'reschedule a new check'
do
expect
(
::
ClusterWaitForAppUpdateWorker
).
to
receive
(
:perform_in
).
once
expect
(
service
).
not_to
receive
(
:remove_pod
)
service
.
execute
expect
(
application
).
to
be_updating
expect
(
application
.
status_reason
).
to
be_nil
end
end
context
'when timed out'
do
let
(
:application
)
{
create
(
:clusters_applications_prometheus
,
:timed_out
,
:updating
)
}
it_behaves_like
'a terminated upgrade'
it
'make the application update errored'
do
expect
(
::
ClusterWaitForAppUpdateWorker
).
not_to
receive
(
:perform_in
)
service
.
execute
expect
(
application
).
to
be_update_errored
expect
(
application
.
status_reason
).
to
eq
(
"Update timed out"
)
end
end
end
end
before
do
allow
(
service
).
to
receive
(
:phase
).
once
.
and_return
(
phase
)
allow
(
service
).
to
receive
(
:errors
).
and_return
(
errors
)
allow
(
service
).
to
receive
(
:remove_pod
).
and_return
(
nil
)
end
describe
'#execute'
do
context
'when upgrade pod succeeded'
do
let
(
:phase
)
{
::
Gitlab
::
Kubernetes
::
Pod
::
SUCCEEDED
}
it_behaves_like
'a terminated upgrade'
it
'make the application upgraded'
do
expect
(
::
ClusterWaitForAppUpdateWorker
).
not_to
receive
(
:perform_in
)
service
.
execute
expect
(
application
).
to
be_updated
expect
(
application
.
status_reason
).
to
be_nil
end
end
context
'when upgrade pod failed'
do
let
(
:phase
)
{
::
Gitlab
::
Kubernetes
::
Pod
::
FAILED
}
let
(
:errors
)
{
'test installation failed'
}
it_behaves_like
'a terminated upgrade'
it
'make the application update errored'
do
service
.
execute
expect
(
application
).
to
be_update_errored
expect
(
application
.
status_reason
).
to
eq
(
errors
)
end
end
RESCHEDULE_PHASES
.
each
{
|
phase
|
it_behaves_like
'a not yet terminated upgrade'
,
phase
}
end
end
spec/services/clusters/applications/prometheus_config_service_spec.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
require
'spec_helper'
describe
Clusters
::
Applications
::
PrometheusConfigService
do
include
Gitlab
::
Routing
.
url_helpers
let_it_be
(
:project
)
{
create
(
:project
)
}
let_it_be
(
:production
)
{
create
(
:environment
,
project:
project
)
}
let_it_be
(
:cluster
)
{
create
(
:cluster
,
:provided_by_user
,
projects:
[
project
])
}
let
(
:application
)
do
create
(
:clusters_applications_prometheus
,
:installed
,
cluster:
cluster
)
end
subject
{
described_class
.
new
(
project
,
cluster
,
application
).
execute
(
input
)
}
describe
'#execute'
do
let
(
:input
)
do
YAML
.
load_file
(
Rails
.
root
.
join
(
'vendor/prometheus/values.yaml'
))
end
context
'with alerts'
do
let!
(
:alert
)
do
create
(
:prometheus_alert
,
project:
project
,
environment:
production
)
end
it
'enables alertmanager'
do
expect
(
subject
.
dig
(
'alertmanager'
,
'enabled'
)).
to
eq
(
true
)
end
describe
'alertmanagerFiles'
do
let
(
:alertmanager
)
do
subject
.
dig
(
'alertmanagerFiles'
,
'alertmanager.yml'
)
end
it
'contains receivers and route'
do
expect
(
alertmanager
.
keys
).
to
contain_exactly
(
'receivers'
,
'route'
)
end
describe
'receivers'
do
let
(
:receiver
)
{
alertmanager
.
dig
(
'receivers'
,
0
)
}
let
(
:webhook_config
)
{
receiver
.
dig
(
'webhook_configs'
,
0
)
}
let
(
:notify_url
)
do
notify_project_prometheus_alerts_url
(
project
,
format: :json
)
end
it
'sets receiver'
do
expect
(
receiver
[
'name'
]).
to
eq
(
'gitlab'
)
end
it
'sets webhook_config'
do
expect
(
webhook_config
).
to
eq
(
'url'
=>
notify_url
,
'send_resolved'
=>
true
,
'http_config'
=>
{
'bearer_token'
=>
application
.
alert_manager_token
}
)
end
end
describe
'route'
do
let
(
:route
)
{
alertmanager
.
fetch
(
'route'
)
}
it
'sets route'
do
expect
(
route
).
to
eq
(
'receiver'
=>
'gitlab'
,
'group_wait'
=>
'30s'
,
'group_interval'
=>
'5m'
,
'repeat_interval'
=>
'4h'
)
end
end
end
describe
'serverFiles'
do
let
(
:groups
)
{
subject
.
dig
(
'serverFiles'
,
'alerts'
,
'groups'
)
}
it
'sets the alerts'
do
rules
=
groups
.
dig
(
0
,
'rules'
)
expect
(
rules
.
size
).
to
eq
(
1
)
expect
(
rules
.
first
[
'alert'
]).
to
eq
(
alert
.
title
)
end
context
'with parameterized queries'
do
let!
(
:alert
)
do
create
(
:prometheus_alert
,
project:
project
,
environment:
production
,
prometheus_metric:
metric
)
end
let
(
:metric
)
do
create
(
:prometheus_metric
,
query:
query
,
project:
project
)
end
let
(
:query
)
{
'%{ci_environment_slug}'
}
it
'substitutes query variables'
do
expect
(
Gitlab
::
Prometheus
::
QueryVariables
)
.
to
receive
(
:call
)
.
with
(
production
)
.
and_call_original
expr
=
groups
.
dig
(
0
,
'rules'
,
0
,
'expr'
)
expect
(
expr
).
to
include
(
production
.
name
)
end
end
context
'with multiple environments'
do
let
(
:staging
)
{
create
(
:environment
,
project:
project
)
}
before
do
create
(
:prometheus_alert
,
project:
project
,
environment:
production
)
create
(
:prometheus_alert
,
project:
project
,
environment:
staging
)
end
it
'sets alerts for multiple environment'
do
env_names
=
groups
.
map
{
|
group
|
group
[
'name'
]
}
expect
(
env_names
).
to
contain_exactly
(
"
#{
production
.
name
}
.rules"
,
"
#{
staging
.
name
}
.rules"
)
end
it
'substitutes query variables once per environment'
do
expect
(
Gitlab
::
Prometheus
::
QueryVariables
)
.
to
receive
(
:call
)
.
with
(
production
)
expect
(
Gitlab
::
Prometheus
::
QueryVariables
)
.
to
receive
(
:call
)
.
with
(
staging
)
subject
end
end
end
end
context
'without alerts'
do
it
'disables alertmanager'
do
expect
(
subject
.
dig
(
'alertmanager'
,
'enabled'
)).
to
eq
(
false
)
end
it
'removes alertmanagerFiles'
do
expect
(
subject
).
not_to
include
(
'alertmanagerFiles'
)
end
it
'removes alerts'
do
expect
(
subject
.
dig
(
'serverFiles'
,
'alerts'
)).
to
eq
({})
end
end
end
end
spec/services/clusters/applications/prometheus_update_service_spec.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
require
'spec_helper'
describe
Clusters
::
Applications
::
PrometheusUpdateService
do
describe
'#execute'
do
let
(
:project
)
{
create
(
:project
)
}
let
(
:environment
)
{
create
(
:environment
,
project:
project
)
}
let
(
:cluster
)
{
create
(
:cluster
,
:provided_by_user
,
:with_installed_helm
,
projects:
[
project
])
}
let
(
:application
)
{
create
(
:clusters_applications_prometheus
,
:installed
,
cluster:
cluster
)
}
let
(
:empty_alerts_values_update_yaml
)
{
"---
\n
alertmanager:
\n
enabled: false
\n
serverFiles:
\n
alerts: {}
\n
"
}
let!
(
:patch_command
)
{
application
.
patch_command
(
empty_alerts_values_update_yaml
)
}
let
(
:helm_client
)
{
instance_double
(
::
Gitlab
::
Kubernetes
::
Helm
::
API
)
}
subject
(
:service
)
{
described_class
.
new
(
application
,
project
)
}
before
do
allow
(
service
).
to
receive
(
:patch_command
).
with
(
empty_alerts_values_update_yaml
).
and_return
(
patch_command
)
allow
(
service
).
to
receive
(
:helm_api
).
and_return
(
helm_client
)
end
context
'when there are no errors'
do
before
do
expect
(
helm_client
).
to
receive
(
:update
).
with
(
patch_command
)
allow
(
::
ClusterWaitForAppUpdateWorker
)
.
to
receive
(
:perform_in
)
.
and_return
(
nil
)
end
it
'make the application updating'
do
expect
(
application
.
cluster
).
not_to
be_nil
service
.
execute
expect
(
application
).
to
be_updating
end
it
'updates current config'
do
prometheus_config_service
=
spy
(
:prometheus_config_service
)
expect
(
Clusters
::
Applications
::
PrometheusConfigService
)
.
to
receive
(
:new
)
.
with
(
project
,
cluster
,
application
)
.
and_return
(
prometheus_config_service
)
expect
(
prometheus_config_service
)
.
to
receive
(
:execute
)
.
and_return
(
YAML
.
safe_load
(
empty_alerts_values_update_yaml
))
service
.
execute
end
it
'schedules async update status check'
do
expect
(
::
ClusterWaitForAppUpdateWorker
).
to
receive
(
:perform_in
).
once
service
.
execute
end
end
context
'when k8s cluster communication fails'
do
before
do
error
=
::
Kubeclient
::
HttpError
.
new
(
500
,
'system failure'
,
nil
)
allow
(
helm_client
).
to
receive
(
:update
).
and_raise
(
error
)
end
it
'make the application update errored'
do
service
.
execute
expect
(
application
).
to
be_update_errored
expect
(
application
.
status_reason
).
to
match
(
/kubernetes error:/i
)
end
end
context
'when application cannot be persisted'
do
let
(
:application
)
{
build
(
:clusters_applications_prometheus
,
:installed
)
}
before
do
allow
(
application
).
to
receive
(
:make_updating!
).
once
.
and_raise
(
ActiveRecord
::
RecordInvalid
.
new
(
application
))
end
it
'make the application update errored'
do
expect
(
helm_client
).
not_to
receive
(
:update
)
service
.
execute
expect
(
application
).
to
be_update_errored
end
end
end
end
spec/services/clusters/applications/schedule_update_service_spec.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
require
'spec_helper'
describe
Clusters
::
Applications
::
ScheduleUpdateService
do
describe
'#execute'
do
let
(
:project
)
{
create
(
:project
)
}
around
do
|
example
|
Timecop
.
freeze
{
example
.
run
}
end
context
'when application is able to be updated'
do
context
'when the application was recently scheduled'
do
it
'schedules worker with a backoff delay'
do
application
=
create
(
:clusters_applications_prometheus
,
:installed
,
last_update_started_at:
Time
.
now
+
5
.
minutes
)
service
=
described_class
.
new
(
application
,
project
)
expect
(
::
ClusterUpdateAppWorker
).
to
receive
(
:perform_in
).
with
(
described_class
::
BACKOFF_DELAY
,
application
.
name
,
application
.
id
,
project
.
id
,
Time
.
now
).
once
service
.
execute
end
end
context
'when the application has not been recently updated'
do
it
'schedules worker'
do
application
=
create
(
:clusters_applications_prometheus
,
:installed
)
service
=
described_class
.
new
(
application
,
project
)
expect
(
::
ClusterUpdateAppWorker
).
to
receive
(
:perform_async
).
with
(
application
.
name
,
application
.
id
,
project
.
id
,
Time
.
now
).
once
service
.
execute
end
end
end
end
end
spec/services/users/build_service_spec.rb
View file @
6f7881ee
...
...
@@ -16,6 +16,14 @@ describe Users::BuildService do
expect
(
service
.
execute
).
to
be_valid
end
context
'calls the UpdateCanonicalEmailService'
do
specify
do
expect
(
Users
::
UpdateCanonicalEmailService
).
to
receive
(
:new
).
and_call_original
service
.
execute
end
end
context
'allowed params'
do
let
(
:params
)
do
{
...
...
spec/services/users/create_service_spec.rb
View file @
6f7881ee
...
...
@@ -8,10 +8,11 @@ describe Users::CreateService do
context
'with an admin user'
do
let
(
:service
)
{
described_class
.
new
(
admin_user
,
params
)
}
let
(
:email
)
{
'jd@example.com'
}
context
'when required parameters are provided'
do
let
(
:params
)
do
{
name:
'John Doe'
,
username:
'jduser'
,
email:
'jd@example.com'
,
password:
'mydummypass'
}
{
name:
'John Doe'
,
username:
'jduser'
,
email:
email
,
password:
'mydummypass'
}
end
it
'returns a persisted user'
do
...
...
spec/services/users/update_canonical_email_service_spec.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
require
'spec_helper'
describe
Users
::
UpdateCanonicalEmailService
do
let
(
:other_email
)
{
"differentaddress@includeddomain.com"
}
before
do
stub_const
(
"Users::UpdateCanonicalEmailService::INCLUDED_DOMAINS_PATTERN"
,
[
/includeddomain/
])
end
describe
'#initialize'
do
context
'unsuccessful'
do
it
'raises an error if there is no user'
do
expect
{
described_class
.
new
(
user:
nil
)
}.
to
raise_error
(
ArgumentError
,
/Please provide a user/
)
end
it
'raises an error if the object is not a User'
do
expect
{
described_class
.
new
(
user:
123
)
}.
to
raise_error
(
ArgumentError
,
/Please provide a user/
)
end
end
context
'when a user is provided'
do
it
'does not error'
do
user
=
build
(
:user
)
expect
{
described_class
.
new
(
user:
user
)
}.
not_to
raise_error
end
end
end
describe
"#canonicalize_email"
do
let
(
:user
)
{
build
(
:user
)
}
let
(
:subject
)
{
described_class
.
new
(
user:
user
)
}
context
'when the email domain is included'
do
context
'strips out any . or anything after + in the agent for included domains'
do
using
RSpec
::
Parameterized
::
TableSyntax
let
(
:expected_result
)
{
'user@includeddomain.com'
}
where
(
:raw_email
,
:expected_result
)
do
'user@includeddomain.com'
|
'user@includeddomain.com'
'u.s.e.r@includeddomain.com'
|
'user@includeddomain.com'
'user+123@includeddomain.com'
|
'user@includeddomain.com'
'us.er+123@includeddomain.com'
|
'user@includeddomain.com'
end
with_them
do
before
do
user
.
email
=
raw_email
end
specify
do
subject
.
execute
expect
(
user
.
user_canonical_email
).
not_to
be_nil
expect
(
user
.
user_canonical_email
.
canonical_email
).
to
eq
expected_result
end
end
end
context
'when the user has an existing canonical email'
do
it
'updates the user canonical email record'
do
user
.
user_canonical_email
=
build
(
:user_canonical_email
,
canonical_email:
other_email
)
user
.
email
=
"us.er+123@includeddomain.com"
subject
.
execute
expect
(
user
.
user_canonical_email
.
canonical_email
).
to
eq
"user@includeddomain.com"
end
end
end
context
'when the email domain is not included'
do
it
'returns nil'
do
user
.
email
=
"u.s.er+343@excludeddomain.com"
subject
.
execute
expect
(
user
.
user_canonical_email
).
to
be_nil
end
it
'destroys any existing UserCanonicalEmail record'
do
user
.
email
=
"u.s.er+343@excludeddomain.com"
user
.
user_canonical_email
=
build
(
:user_canonical_email
,
canonical_email:
other_email
)
expect
(
user
.
user_canonical_email
).
to
receive
(
:delete
)
subject
.
execute
end
end
context
'when the user email is not processable'
do
[
nil
,
'nonsense'
].
each
do
|
invalid_address
|
before
do
user
.
email
=
invalid_address
end
specify
do
subject
.
execute
expect
(
user
.
user_canonical_email
).
to
be_nil
end
it
'preserves any existing record'
do
user
.
email
=
nil
user
.
user_canonical_email
=
build
(
:user_canonical_email
,
canonical_email:
other_email
)
subject
.
execute
expect
(
user
.
user_canonical_email
.
canonical_email
).
to
eq
other_email
end
end
end
end
end
spec/services/users/update_service_spec.rb
View file @
6f7881ee
...
...
@@ -71,6 +71,32 @@ describe Users::UpdateService do
expect
(
user
.
job_title
).
to
eq
(
'Backend Engineer'
)
end
context
'updating canonical email'
do
context
'if email was changed'
do
subject
do
update_user
(
user
,
email:
'user+extrastuff@example.com'
)
end
it
'calls canonicalize_email'
do
expect_next_instance_of
(
Users
::
UpdateCanonicalEmailService
)
do
|
service
|
expect
(
service
).
to
receive
(
:execute
)
end
subject
end
end
context
'if email was NOT changed'
do
subject
do
update_user
(
user
,
job_title:
'supreme leader of the universe'
)
end
it
'skips update canonicalize email service call'
do
expect
{
subject
}.
not_to
change
{
user
.
user_canonical_email
}
end
end
end
def
update_user
(
user
,
opts
)
described_class
.
new
(
user
,
opts
.
merge
(
user:
user
)).
execute
end
...
...
spec/workers/cluster_update_app_worker_spec.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
require
'spec_helper'
describe
ClusterUpdateAppWorker
do
include
ExclusiveLeaseHelpers
let_it_be
(
:project
)
{
create
(
:project
)
}
let
(
:prometheus_update_service
)
{
spy
}
subject
{
described_class
.
new
}
around
do
|
example
|
Timecop
.
freeze
(
Time
.
now
)
{
example
.
run
}
end
before
do
allow
(
::
Clusters
::
Applications
::
PrometheusUpdateService
).
to
receive
(
:new
).
and_return
(
prometheus_update_service
)
end
describe
'#perform'
do
context
'when the application last_update_started_at is higher than the time the job was scheduled in'
do
it
'does nothing'
do
application
=
create
(
:clusters_applications_prometheus
,
:updated
,
last_update_started_at:
Time
.
now
)
expect
(
prometheus_update_service
).
not_to
receive
(
:execute
)
expect
(
subject
.
perform
(
application
.
name
,
application
.
id
,
project
.
id
,
Time
.
now
-
5
.
minutes
)).
to
be_nil
end
end
context
'when another worker is already running'
do
it
'returns nil'
do
application
=
create
(
:clusters_applications_prometheus
,
:updating
)
expect
(
subject
.
perform
(
application
.
name
,
application
.
id
,
project
.
id
,
Time
.
now
)).
to
be_nil
end
end
it
'executes PrometheusUpdateService'
do
application
=
create
(
:clusters_applications_prometheus
,
:installed
)
expect
(
prometheus_update_service
).
to
receive
(
:execute
)
subject
.
perform
(
application
.
name
,
application
.
id
,
project
.
id
,
Time
.
now
)
end
context
'with exclusive lease'
do
let
(
:application
)
{
create
(
:clusters_applications_prometheus
,
:installed
)
}
let
(
:lease_key
)
{
"
#{
described_class
.
name
.
underscore
}
-
#{
application
.
id
}
"
}
before
do
allow
(
Gitlab
::
ExclusiveLease
).
to
receive
(
:new
)
stub_exclusive_lease_taken
(
lease_key
)
end
it
'does not allow same app to be updated concurrently by same project'
do
expect
(
Clusters
::
Applications
::
PrometheusUpdateService
).
not_to
receive
(
:new
)
subject
.
perform
(
application
.
name
,
application
.
id
,
project
.
id
,
Time
.
now
)
end
it
'does not allow same app to be updated concurrently by different project'
do
project1
=
create
(
:project
)
expect
(
Clusters
::
Applications
::
PrometheusUpdateService
).
not_to
receive
(
:new
)
subject
.
perform
(
application
.
name
,
application
.
id
,
project1
.
id
,
Time
.
now
)
end
it
'allows different app to be updated concurrently by same project'
do
application2
=
create
(
:clusters_applications_prometheus
,
:installed
)
lease_key2
=
"
#{
described_class
.
name
.
underscore
}
-
#{
application2
.
id
}
"
stub_exclusive_lease
(
lease_key2
)
expect
(
Clusters
::
Applications
::
PrometheusUpdateService
).
to
receive
(
:new
)
.
with
(
application2
,
project
)
subject
.
perform
(
application2
.
name
,
application2
.
id
,
project
.
id
,
Time
.
now
)
end
it
'allows different app to be updated by different project'
do
application2
=
create
(
:clusters_applications_prometheus
,
:installed
)
lease_key2
=
"
#{
described_class
.
name
.
underscore
}
-
#{
application2
.
id
}
"
project2
=
create
(
:project
)
stub_exclusive_lease
(
lease_key2
)
expect
(
Clusters
::
Applications
::
PrometheusUpdateService
).
to
receive
(
:new
)
.
with
(
application2
,
project2
)
subject
.
perform
(
application2
.
name
,
application2
.
id
,
project2
.
id
,
Time
.
now
)
end
end
end
end
spec/workers/cluster_wait_for_app_update_worker_spec.rb
0 → 100644
View file @
6f7881ee
# frozen_string_literal: true
require
'spec_helper'
describe
ClusterWaitForAppUpdateWorker
do
let
(
:check_upgrade_progress_service
)
{
spy
}
before
do
allow
(
::
Clusters
::
Applications
::
CheckUpgradeProgressService
).
to
receive
(
:new
).
and_return
(
check_upgrade_progress_service
)
end
it
'runs CheckUpgradeProgressService when application is found'
do
application
=
create
(
:clusters_applications_prometheus
)
expect
(
check_upgrade_progress_service
).
to
receive
(
:execute
)
subject
.
perform
(
application
.
name
,
application
.
id
)
end
it
'does not run CheckUpgradeProgressService when application is not found'
do
expect
(
check_upgrade_progress_service
).
not_to
receive
(
:execute
)
expect
do
subject
.
perform
(
"prometheus"
,
-
1
)
end
.
to
raise_error
(
ActiveRecord
::
RecordNotFound
)
end
end
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment