Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
S
scan-filesystem
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Xavier Thompson
scan-filesystem
Commits
9f34d772
Unverified
Commit
9f34d772
authored
Sep 17, 2021
by
Julien Jerphanion
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Remove contention on runtime
See merge request
!2
parent
ea6ba5dd
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
25 additions
and
21 deletions
+25
-21
runtime.pxd
cython/runtime/runtime.pxd
+25
-21
No files found.
cython/runtime/runtime.pxd
View file @
9f34d772
...
...
@@ -21,7 +21,6 @@ cdef cypclass Worker
cdef inline void * worker_function(void * arg) nogil:
worker = <lock Worker> arg
sch = <Scheduler> <void*> worker.scheduler
cdef int num_remaining_queues
# Wait until all the workers are ready.
pthread_barrier_wait(&sch.barrier)
while 1:
...
...
@@ -66,33 +65,32 @@ cdef cypclass Worker:
lock SequentialMailBox get_queue(lock self):
# Get the next queue in the worker's list or steal one.
with wlocked self:
if not self.queues.empty():
queue = self.queues.front()
self.queues.pop_front()
return queue
if not self.queues.empty():
queue = self.queues.front()
self.queues.pop_front()
return queue
return self.steal_queue()
lock SequentialMailBox steal_queue(lock self):
# Steal a queue from another worker:
# - inspect each worker in order starting at a random offset
# - skip
this worker and
any worker with an empty queue list
# - skip any worker with an empty queue list
# - return the last queue of the first worker with a non-empty list
# - continue looping until a queue is found
cdef int i, index, num_workers, random_offset
sch = <Scheduler> <void*> self.scheduler
num_workers = <int> sch.workers.size()
random_offset = rand() % num_workers
for i in range(num_workers):
index = (i + random_offset) % num_workers
index = rand() % num_workers
while True:
victim = sch.workers[index]
if victim is self:
continue
with wlocked victim:
if not victim.queues.empty():
stolen_queue = victim.queues.back()
victim.queues.pop_back()
stolen_queue.has_worker = True
return stolen_queue
return NULL
index += 1
if index >= num_workers:
index = 0
int join(self):
# Join the worker thread.
...
...
@@ -106,10 +104,12 @@ cdef cypclass Scheduler:
atomic[int] num_pending_queues
sem_t done
volatile bint is_done
int num_workers
lock Scheduler __new__(alloc, int num_workers=0):
self = <lock Scheduler> consume alloc()
if num_workers == 0: num_workers = sysconf(_SC_NPROCESSORS_ONLN)
self.num_workers = num_workers
sem_init(&self.num_free_queues, 0, 0)
sem_init(&self.done, 0, 0)
self.num_pending_queues.store(0)
...
...
@@ -136,16 +136,20 @@ cdef cypclass Scheduler:
sem_destroy(&self.num_free_queues)
sem_destroy(&self.done)
void post_queue(self, lock SequentialMailBox queue):
# Add a queue to the first worker.
main_worker = self.workers[0]
with wlocked main_worker:
queue.has_worker = True
main_worker.queues.push_back(queue)
void post_queue(lock self, lock SequentialMailBox queue):
cdef int num_workers, random_offset
sch = <Scheduler> <void*> self
# Add a queue to a random worker.
num_workers = <int> sch.workers.size()
random_offset = rand() % num_workers
receiver = sch.workers[random_offset]
with wlocked receiver:
queue.has_worker = True
receiver.queues.push_back(queue)
# Increment the number of non-completed queues.
s
elf
.num_pending_queues.fetch_add(1)
s
ch
.num_pending_queues.fetch_add(1)
# Signal that a queue is available.
sem_post(&s
elf
.num_free_queues)
sem_post(&s
ch
.num_free_queues)
void finish(lock self):
# Wait until there is no more work.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment