Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
G
gitlab-ce
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
gitlab-ce
Commits
2040d996
Commit
2040d996
authored
Mar 21, 2018
by
Douglas Barbosa Alexandre
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Avoid rescheduling the same project again in a backfill condition
parent
ec67a370
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
21 additions
and
8 deletions
+21
-8
ee/app/workers/geo/repository_shard_sync_worker.rb
ee/app/workers/geo/repository_shard_sync_worker.rb
+7
-1
ee/app/workers/geo/scheduler/base_worker.rb
ee/app/workers/geo/scheduler/base_worker.rb
+2
-7
ee/spec/workers/geo/repository_shard_sync_worker_spec.rb
ee/spec/workers/geo/repository_shard_sync_worker_spec.rb
+12
-0
No files found.
ee/app/workers/geo/repository_shard_sync_worker.rb
View file @
2040d996
...
@@ -39,7 +39,11 @@ module Geo
...
@@ -39,7 +39,11 @@ module Geo
def
schedule_job
(
project_id
)
def
schedule_job
(
project_id
)
job_id
=
Geo
::
ProjectSyncWorker
.
perform_async
(
project_id
,
Time
.
now
)
job_id
=
Geo
::
ProjectSyncWorker
.
perform_async
(
project_id
,
Time
.
now
)
{
id:
project_id
,
job_id:
job_id
}
if
job_id
{
project_id:
project_id
,
job_id:
job_id
}
if
job_id
end
def
scheduled_project_ids
scheduled_jobs
.
map
{
|
data
|
data
[
:project_id
]
}
end
end
def
finder
def
finder
...
@@ -59,12 +63,14 @@ module Geo
...
@@ -59,12 +63,14 @@ module Geo
def
find_project_ids_not_synced
(
batch_size
:)
def
find_project_ids_not_synced
(
batch_size
:)
shard_restriction
(
finder
.
find_unsynced_projects
(
batch_size:
batch_size
))
shard_restriction
(
finder
.
find_unsynced_projects
(
batch_size:
batch_size
))
.
where
.
not
(
id:
scheduled_project_ids
)
.
reorder
(
last_repository_updated_at: :desc
)
.
reorder
(
last_repository_updated_at: :desc
)
.
pluck
(
:id
)
.
pluck
(
:id
)
end
end
def
find_project_ids_updated_recently
(
batch_size
:)
def
find_project_ids_updated_recently
(
batch_size
:)
shard_restriction
(
finder
.
find_projects_updated_recently
(
batch_size:
batch_size
))
shard_restriction
(
finder
.
find_projects_updated_recently
(
batch_size:
batch_size
))
.
where
.
not
(
id:
scheduled_project_ids
)
.
order
(
'project_registry.last_repository_synced_at ASC NULLS FIRST, projects.last_repository_updated_at ASC'
)
.
order
(
'project_registry.last_repository_synced_at ASC NULLS FIRST, projects.last_repository_updated_at ASC'
)
.
pluck
(
:id
)
.
pluck
(
:id
)
end
end
...
...
ee/app/workers/geo/scheduler/base_worker.rb
View file @
2040d996
...
@@ -145,7 +145,7 @@ module Geo
...
@@ -145,7 +145,7 @@ module Geo
status
=
Gitlab
::
SidekiqStatus
.
job_status
(
scheduled_job_ids
)
status
=
Gitlab
::
SidekiqStatus
.
job_status
(
scheduled_job_ids
)
# SidekiqStatus returns an array of booleans: true if the job is still running, false otherwise.
# SidekiqStatus returns an array of booleans: true if the job is still running, false otherwise.
# For each entry, first use `zip` to make { job_id: 123
, id: 10 } -> [ { job_id: 123, id: 10
}, bool ]
# For each entry, first use `zip` to make { job_id: 123
} -> [ { job_id: 123
}, bool ]
# Next, filter out the jobs that have completed.
# Next, filter out the jobs that have completed.
@scheduled_jobs
=
@scheduled_jobs
.
zip
(
status
).
map
{
|
(
job
,
running
)
|
job
if
running
}.
compact
@scheduled_jobs
=
@scheduled_jobs
.
zip
(
status
).
map
{
|
(
job
,
running
)
|
job
if
running
}.
compact
end
end
...
@@ -160,12 +160,7 @@ module Geo
...
@@ -160,12 +160,7 @@ module Geo
num_to_schedule
=
0
if
num_to_schedule
<
0
num_to_schedule
=
0
if
num_to_schedule
<
0
to_schedule
=
pending_resources
.
shift
(
num_to_schedule
)
to_schedule
=
pending_resources
.
shift
(
num_to_schedule
)
scheduled
=
to_schedule
.
map
{
|
args
|
schedule_job
(
*
args
)
}.
compact
scheduled
=
to_schedule
.
map
do
|
args
|
job
=
schedule_job
(
*
args
)
job
if
job
&
.
fetch
(
:job_id
,
nil
).
present?
end
.
compact
scheduled_jobs
.
concat
(
scheduled
)
scheduled_jobs
.
concat
(
scheduled
)
log_info
(
"Loop
#{
loops
}
"
,
enqueued:
scheduled
.
length
,
pending:
pending_resources
.
length
,
scheduled:
scheduled_jobs
.
length
,
capacity:
capacity
)
log_info
(
"Loop
#{
loops
}
"
,
enqueued:
scheduled
.
length
,
pending:
pending_resources
.
length
,
scheduled:
scheduled_jobs
.
length
,
capacity:
capacity
)
...
...
ee/spec/workers/geo/repository_shard_sync_worker_spec.rb
View file @
2040d996
...
@@ -64,6 +64,18 @@ describe Geo::RepositoryShardSyncWorker, :geo, :delete, :clean_gitlab_redis_cach
...
@@ -64,6 +64,18 @@ describe Geo::RepositoryShardSyncWorker, :geo, :delete, :clean_gitlab_redis_cach
subject
.
perform
(
shard_name
)
subject
.
perform
(
shard_name
)
end
end
it
'does not schedule a job twice for the same project'
do
scheduled_jobs
=
[
{
job_id:
1
,
project_id:
unsynced_project
.
id
},
{
job_id:
2
,
project_id:
unsynced_project_in_restricted_group
.
id
}
]
is_expected
.
to
receive
(
:scheduled_jobs
).
and_return
(
scheduled_jobs
).
at_least
(
:once
)
is_expected
.
not_to
receive
(
:schedule_job
)
Sidekiq
::
Testing
.
inline!
{
subject
.
perform
(
shard_name
)
}
end
it
'does not perform Geo::ProjectSyncWorker when no geo database is configured'
do
it
'does not perform Geo::ProjectSyncWorker when no geo database is configured'
do
allow
(
Gitlab
::
Geo
).
to
receive
(
:geo_database_configured?
)
{
false
}
allow
(
Gitlab
::
Geo
).
to
receive
(
:geo_database_configured?
)
{
false
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment