Commit b56e88e2 authored by Manfred Spraul's avatar Manfred Spraul Committed by Greg Kroah-Hartman

ipc/sem.c: rename try_atomic_semop() to perform_atomic_semop(), docu update

commit 758a6ba3 upstream.

Cleanup: Some minor points that I noticed while writing the previous
patches

1) The name try_atomic_semop() is misleading: The function performs the
   operation (if it is possible).

2) Some documentation updates.

No real code change, a rename and documentation changes.
Signed-off-by: default avatarManfred Spraul <manfred@colorfullife.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent bf6830ad
...@@ -154,12 +154,15 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); ...@@ -154,12 +154,15 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
/* /*
* linked list protection: * Locking:
* sem_undo.id_next, * sem_undo.id_next,
* sem_array.complex_count,
* sem_array.pending{_alter,_cont}, * sem_array.pending{_alter,_cont},
* sem_array.sem_undo: sem_lock() for read/write * sem_array.sem_undo: global sem_lock() for read/write
* sem_undo.proc_next: only "current" is allowed to read/write that field. * sem_undo.proc_next: only "current" is allowed to read/write that field.
* *
* sem_array.sem_base[i].pending_{const,alter}:
* global or semaphore sem_lock() for read/write
*/ */
#define sc_semmsl sem_ctls[0] #define sc_semmsl sem_ctls[0]
...@@ -536,12 +539,19 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) ...@@ -536,12 +539,19 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
} }
/* /** perform_atomic_semop - Perform (if possible) a semaphore operation
* Determine whether a sequence of semaphore operations would succeed * @sma: semaphore array
* all at once. Return 0 if yes, 1 if need to sleep, else return error code. * @sops: array with operations that should be checked
* @nsems: number of sops
* @un: undo array
* @pid: pid that did the change
*
* Returns 0 if the operation was possible.
* Returns 1 if the operation is impossible, the caller must sleep.
* Negative values are error codes.
*/ */
static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops, static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
int nsops, struct sem_undo *un, int pid) int nsops, struct sem_undo *un, int pid)
{ {
int result, sem_op; int result, sem_op;
...@@ -724,8 +734,8 @@ static int wake_const_ops(struct sem_array *sma, int semnum, ...@@ -724,8 +734,8 @@ static int wake_const_ops(struct sem_array *sma, int semnum,
q = container_of(walk, struct sem_queue, list); q = container_of(walk, struct sem_queue, list);
walk = walk->next; walk = walk->next;
error = try_atomic_semop(sma, q->sops, q->nsops, error = perform_atomic_semop(sma, q->sops, q->nsops,
q->undo, q->pid); q->undo, q->pid);
if (error <= 0) { if (error <= 0) {
/* operation completed, remove from queue & wakeup */ /* operation completed, remove from queue & wakeup */
...@@ -838,7 +848,7 @@ static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) ...@@ -838,7 +848,7 @@ static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
if (semnum != -1 && sma->sem_base[semnum].semval == 0) if (semnum != -1 && sma->sem_base[semnum].semval == 0)
break; break;
error = try_atomic_semop(sma, q->sops, q->nsops, error = perform_atomic_semop(sma, q->sops, q->nsops,
q->undo, q->pid); q->undo, q->pid);
/* Does q->sleeper still need to sleep? */ /* Does q->sleeper still need to sleep? */
...@@ -1686,7 +1696,6 @@ static int get_queue_result(struct sem_queue *q) ...@@ -1686,7 +1696,6 @@ static int get_queue_result(struct sem_queue *q)
return error; return error;
} }
SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unsigned, nsops, const struct timespec __user *, timeout) unsigned, nsops, const struct timespec __user *, timeout)
{ {
...@@ -1784,7 +1793,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, ...@@ -1784,7 +1793,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
if (un && un->semid == -1) if (un && un->semid == -1)
goto out_unlock_free; goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); error = perform_atomic_semop(sma, sops, nsops, un,
task_tgid_vnr(current));
if (error <= 0) { if (error <= 0) {
if (alter && error == 0) if (alter && error == 0)
do_smart_update(sma, sops, nsops, 1, &tasks); do_smart_update(sma, sops, nsops, 1, &tasks);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment