Commit 2f2ed41d authored by Manfred Spraul's avatar Manfred Spraul Committed by Linus Torvalds

ipc/sem.c: remove code duplication

count_semzcnt and count_semncnt are more of less identical.  The patch
creates a single function that either counts the number of tasks waiting
for zero or waiting due to a decrease operation.

Compared to the initial version, the BUG_ONs were removed.
Signed-off-by: default avatarManfred Spraul <manfred@colorfullife.com>
Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1994862d
...@@ -47,8 +47,7 @@ ...@@ -47,8 +47,7 @@
* Thus: Perfect SMP scaling between independent semaphore arrays. * Thus: Perfect SMP scaling between independent semaphore arrays.
* If multiple semaphores in one array are used, then cache line * If multiple semaphores in one array are used, then cache line
* trashing on the semaphore array spinlock will limit the scaling. * trashing on the semaphore array spinlock will limit the scaling.
* - semncnt and semzcnt are calculated on demand in count_semncnt() and * - semncnt and semzcnt are calculated on demand in count_semcnt()
* count_semzcnt()
* - the task that performs a successful semop() scans the list of all * - the task that performs a successful semop() scans the list of all
* sleeping tasks and completes any pending operations that can be fulfilled. * sleeping tasks and completes any pending operations that can be fulfilled.
* Semaphores are actively given to waiting tasks (necessary for FIFO). * Semaphores are actively given to waiting tasks (necessary for FIFO).
...@@ -989,6 +988,31 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop ...@@ -989,6 +988,31 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
set_semotime(sma, sops); set_semotime(sma, sops);
} }
/*
* check_qop: Test how often a queued operation sleeps on the semaphore semnum
*/
static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
bool count_zero)
{
struct sembuf *sops = q->sops;
int nsops = q->nsops;
int i, semcnt;
semcnt = 0;
for (i = 0; i < nsops; i++) {
if (sops[i].sem_num != semnum)
continue;
if (sops[i].sem_flg & IPC_NOWAIT)
continue;
if (count_zero && sops[i].sem_op == 0)
semcnt++;
if (!count_zero && sops[i].sem_op < 0)
semcnt++;
}
return semcnt;
}
/* The following counts are associated to each semaphore: /* The following counts are associated to each semaphore:
* semncnt number of tasks waiting on semval being nonzero * semncnt number of tasks waiting on semval being nonzero
* semzcnt number of tasks waiting on semval being zero * semzcnt number of tasks waiting on semval being zero
...@@ -998,66 +1022,37 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop ...@@ -998,66 +1022,37 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
* The counts we return here are a rough approximation, but still * The counts we return here are a rough approximation, but still
* warrant that semncnt+semzcnt>0 if the task is on the pending queue. * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
*/ */
static int count_semncnt(struct sem_array *sma, ushort semnum) static int count_semcnt(struct sem_array *sma, ushort semnum,
bool count_zero)
{ {
int semncnt; struct list_head *l;
struct sem_queue *q; struct sem_queue *q;
int semcnt;
semncnt = 0; semcnt = 0;
list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) { /* First: check the simple operations. They are easy to evaluate */
struct sembuf *sops = q->sops; if (count_zero)
BUG_ON(sops->sem_num != semnum); l = &sma->sem_base[semnum].pending_const;
if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT)) else
semncnt++; l = &sma->sem_base[semnum].pending_alter;
}
list_for_each_entry(q, &sma->pending_alter, list) { list_for_each_entry(q, l, list) {
struct sembuf *sops = q->sops; /* all task on a per-semaphore list sleep on exactly
int nsops = q->nsops; * that semaphore
int i; */
for (i = 0; i < nsops; i++) semcnt++;
if (sops[i].sem_num == semnum
&& (sops[i].sem_op < 0)
&& !(sops[i].sem_flg & IPC_NOWAIT))
semncnt++;
} }
return semncnt;
}
static int count_semzcnt(struct sem_array *sma, ushort semnum) /* Then: check the complex operations. */
{ list_for_each_entry(q, &sma->pending_alter, list) {
int semzcnt; semcnt += check_qop(sma, semnum, q, count_zero);
struct sem_queue *q;
semzcnt = 0;
list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) {
struct sembuf *sops = q->sops;
BUG_ON(sops->sem_num != semnum);
if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
semzcnt++;
} }
if (count_zero) {
list_for_each_entry(q, &sma->pending_const, list) { list_for_each_entry(q, &sma->pending_const, list) {
struct sembuf *sops = q->sops; semcnt += check_qop(sma, semnum, q, count_zero);
int nsops = q->nsops;
int i;
for (i = 0; i < nsops; i++)
if (sops[i].sem_num == semnum
&& (sops[i].sem_op == 0)
&& !(sops[i].sem_flg & IPC_NOWAIT))
semzcnt++;
} }
list_for_each_entry(q, &sma->pending_alter, list) {
struct sembuf *sops = q->sops;
int nsops = q->nsops;
int i;
for (i = 0; i < nsops; i++)
if (sops[i].sem_num == semnum
&& (sops[i].sem_op == 0)
&& !(sops[i].sem_flg & IPC_NOWAIT))
semzcnt++;
} }
return semzcnt; return semcnt;
} }
/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
...@@ -1459,10 +1454,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, ...@@ -1459,10 +1454,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
err = curr->sempid; err = curr->sempid;
goto out_unlock; goto out_unlock;
case GETNCNT: case GETNCNT:
err = count_semncnt(sma, semnum); err = count_semcnt(sma, semnum, 0);
goto out_unlock; goto out_unlock;
case GETZCNT: case GETZCNT:
err = count_semzcnt(sma, semnum); err = count_semcnt(sma, semnum, 1);
goto out_unlock; goto out_unlock;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment