Commit ea6ba5dd authored by Xavier Thompson's avatar Xavier Thompson

Adapt to compiler changes

parent 90392daf
...@@ -15,7 +15,7 @@ from stdlib.dirent cimport DIR, struct_dirent, opendir, readdir, closedir ...@@ -15,7 +15,7 @@ from stdlib.dirent cimport DIR, struct_dirent, opendir, readdir, closedir
from posix.unistd cimport readlink from posix.unistd cimport readlink
cdef locked Scheduler scheduler cdef lock Scheduler scheduler
cdef cypclass Node activable: cdef cypclass Node activable:
...@@ -31,7 +31,7 @@ cdef cypclass Node activable: ...@@ -31,7 +31,7 @@ cdef cypclass Node activable:
self.name = name self.name = name
self.st = st self.st = st
void build_node(self, locked cyplist[dev_t] dev_whitelist, locked cyplist[string] ignore_paths): void build_node(self, lock cyplist[dev_t] dev_whitelist, lock cyplist[string] ignore_paths):
# abstract # abstract
pass pass
...@@ -73,7 +73,7 @@ cdef cypclass DirNode(Node): ...@@ -73,7 +73,7 @@ cdef cypclass DirNode(Node):
self.children = new cyplist[active Node]() self.children = new cyplist[active Node]()
self.children.__init__() self.children.__init__()
void build_node(self, locked cyplist[dev_t] dev_whitelist, locked cyplist[string] ignore_paths): void build_node(self, lock cyplist[dev_t] dev_whitelist, lock cyplist[string] ignore_paths):
cdef DIR *d cdef DIR *d
cdef struct_dirent *entry cdef struct_dirent *entry
cdef string entry_name cdef string entry_name
...@@ -138,7 +138,7 @@ cdef cypclass FileNode(Node): ...@@ -138,7 +138,7 @@ cdef cypclass FileNode(Node):
Node.__init__(self, path, name, st) Node.__init__(self, path, name, st)
self.error = False self.error = False
void build_node(self, locked cyplist[dev_t] dev_whitelist, locked cyplist[string] ignore_paths): void build_node(self, lock cyplist[dev_t] dev_whitelist, lock cyplist[string] ignore_paths):
cdef unsigned char buffer[BUFSIZE] cdef unsigned char buffer[BUFSIZE]
cdef bint eof = False cdef bint eof = False
cdef bint md5_ok cdef bint md5_ok
...@@ -219,7 +219,7 @@ cdef cypclass SymlinkNode(Node): ...@@ -219,7 +219,7 @@ cdef cypclass SymlinkNode(Node):
string target string target
int error int error
void build_node(self, locked cyplist[dev_t] dev_whitelist, locked cyplist[string] ignore_paths): void build_node(self, lock cyplist[dev_t] dev_whitelist, lock cyplist[string] ignore_paths):
size = self.st.st_data.st_size + 1 size = self.st.st_data.st_size + 1
self.target.resize(size) self.target.resize(size)
real_size = readlink(self.path.c_str(), <char*> self.target.data(), size) real_size = readlink(self.path.c_str(), <char*> self.target.data(), size)
......
...@@ -19,7 +19,7 @@ cdef cypclass Worker ...@@ -19,7 +19,7 @@ cdef cypclass Worker
# The 'inline' qualifier on this function is a hack to convince Cython to allow a definition in a .pxd file. # The 'inline' qualifier on this function is a hack to convince Cython to allow a definition in a .pxd file.
# The C compiler will dismiss it because we pass the function pointer to create a thread which prevents inlining. # The C compiler will dismiss it because we pass the function pointer to create a thread which prevents inlining.
cdef inline void * worker_function(void * arg) nogil: cdef inline void * worker_function(void * arg) nogil:
worker = <locked Worker> arg worker = <lock Worker> arg
sch = <Scheduler> <void*> worker.scheduler sch = <Scheduler> <void*> worker.scheduler
cdef int num_remaining_queues cdef int num_remaining_queues
# Wait until all the workers are ready. # Wait until all the workers are ready.
...@@ -51,19 +51,19 @@ cdef inline void * worker_function(void * arg) nogil: ...@@ -51,19 +51,19 @@ cdef inline void * worker_function(void * arg) nogil:
cdef cypclass Worker: cdef cypclass Worker:
deque[locked SequentialMailBox] queues deque[lock SequentialMailBox] queues
locked Scheduler scheduler lock Scheduler scheduler
pthread_t thread pthread_t thread
locked Worker __new__(alloc, locked Scheduler scheduler): lock Worker __new__(alloc, lock Scheduler scheduler):
instance = consume alloc() instance = consume alloc()
instance.scheduler = scheduler instance.scheduler = scheduler
locked_instance = <locked Worker> consume instance locked_instance = <lock Worker> consume instance
if not pthread_create(&locked_instance.thread, NULL, worker_function, <void *> locked_instance): if not pthread_create(&locked_instance.thread, NULL, worker_function, <void *> locked_instance):
return locked_instance return locked_instance
printf("pthread_create() failed\n") printf("pthread_create() failed\n")
locked SequentialMailBox get_queue(locked self): lock SequentialMailBox get_queue(lock self):
# Get the next queue in the worker's list or steal one. # Get the next queue in the worker's list or steal one.
with wlocked self: with wlocked self:
if not self.queues.empty(): if not self.queues.empty():
...@@ -72,7 +72,7 @@ cdef cypclass Worker: ...@@ -72,7 +72,7 @@ cdef cypclass Worker:
return queue return queue
return self.steal_queue() return self.steal_queue()
locked SequentialMailBox steal_queue(locked self): lock SequentialMailBox steal_queue(lock self):
# Steal a queue from another worker: # Steal a queue from another worker:
# - inspect each worker in order starting at a random offset # - inspect each worker in order starting at a random offset
# - skip this worker and any worker with an empty queue list # - skip this worker and any worker with an empty queue list
...@@ -100,15 +100,15 @@ cdef cypclass Worker: ...@@ -100,15 +100,15 @@ cdef cypclass Worker:
cdef cypclass Scheduler: cdef cypclass Scheduler:
vector[locked Worker] workers vector[lock Worker] workers
pthread_barrier_t barrier pthread_barrier_t barrier
sem_t num_free_queues sem_t num_free_queues
atomic[int] num_pending_queues atomic[int] num_pending_queues
sem_t done sem_t done
volatile bint is_done volatile bint is_done
locked Scheduler __new__(alloc, int num_workers=0): lock Scheduler __new__(alloc, int num_workers=0):
self = <locked Scheduler> consume alloc() self = <lock Scheduler> consume alloc()
if num_workers == 0: num_workers = sysconf(_SC_NPROCESSORS_ONLN) if num_workers == 0: num_workers = sysconf(_SC_NPROCESSORS_ONLN)
sem_init(&self.num_free_queues, 0, 0) sem_init(&self.num_free_queues, 0, 0)
sem_init(&self.done, 0, 0) sem_init(&self.done, 0, 0)
...@@ -136,7 +136,7 @@ cdef cypclass Scheduler: ...@@ -136,7 +136,7 @@ cdef cypclass Scheduler:
sem_destroy(&self.num_free_queues) sem_destroy(&self.num_free_queues)
sem_destroy(&self.done) sem_destroy(&self.done)
void post_queue(self, locked SequentialMailBox queue): void post_queue(self, lock SequentialMailBox queue):
# Add a queue to the first worker. # Add a queue to the first worker.
main_worker = self.workers[0] main_worker = self.workers[0]
with wlocked main_worker: with wlocked main_worker:
...@@ -147,7 +147,7 @@ cdef cypclass Scheduler: ...@@ -147,7 +147,7 @@ cdef cypclass Scheduler:
# Signal that a queue is available. # Signal that a queue is available.
sem_post(&self.num_free_queues) sem_post(&self.num_free_queues)
void finish(locked self): void finish(lock self):
# Wait until there is no more work. # Wait until there is no more work.
done = &self.done done = &self.done
sem_wait(done) sem_wait(done)
...@@ -163,17 +163,17 @@ cdef cypclass Scheduler: ...@@ -163,17 +163,17 @@ cdef cypclass Scheduler:
cdef cypclass SequentialMailBox(ActhonQueueInterface): cdef cypclass SequentialMailBox(ActhonQueueInterface):
deque[ActhonMessageInterface] messages deque[ActhonMessageInterface] messages
locked Scheduler scheduler lock Scheduler scheduler
bint has_worker bint has_worker
__init__(self, locked Scheduler scheduler): __init__(self, lock Scheduler scheduler):
self.scheduler = scheduler self.scheduler = scheduler
self.has_worker = False self.has_worker = False
bint is_empty(const self): bint is_empty(const self):
return self.messages.empty() return self.messages.empty()
void push(locked& self, ActhonMessageInterface message): void push(locked self, ActhonMessageInterface message):
# Add a task to the queue. # Add a task to the queue.
self.messages.push_back(message) self.messages.push_back(message)
if message._sync_method is not NULL: if message._sync_method is not NULL:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment