Commit 272a8d2c authored by Nathan Scott's avatar Nathan Scott

[XFS] Switch to using the BSD qsort implementation.

SGI Modid: 2.5.x-xfs:slinx:162158a
parent ab5d6be1
/* Copyright (C) 1991, 1992, 1996, 1997, 1999 Free Software Foundation, Inc. /*
This file is part of the GNU C Library. * Copyright (c) 1992, 1993
Written by Douglas C. Schmidt (schmidt@ics.uci.edu). * The Regents of the University of California. All rights reserved.
*
The GNU C Library is free software; you can redistribute it and/or * Redistribution and use in source and binary forms, with or without
modify it under the terms of the GNU Lesser General Public * modification, are permitted provided that the following conditions
License as published by the Free Software Foundation; either * are met:
version 2.1 of the License, or (at your option) any later version. * 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
The GNU C Library is distributed in the hope that it will be useful, * 2. Redistributions in binary form must reproduce the above copyright
but WITHOUT ANY WARRANTY; without even the implied warranty of * notice, this list of conditions and the following disclaimer in the
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * documentation and/or other materials provided with the distribution.
Lesser General Public License for more details. * 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
You should have received a copy of the GNU Lesser General Public * without specific prior written permission.
License along with the GNU C Library; if not, write to the Free *
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
02111-1307 USA. */ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
/* If you consider tuning this algorithm, you should consult first: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
Engineering a sort function; Jon Bentley and M. Douglas McIlroy; * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
Software - Practice and Experience; Vol. 23 (11), 1249-1265, 1993. */ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h> #include <linux/string.h>
/* Byte-wise swap two items of size SIZE. */ /*
#define SWAP(a, b, size) \ * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
do \ */
{ \ #define swapcode(TYPE, parmi, parmj, n) { \
register size_t __size = (size); \ long i = (n) / sizeof (TYPE); \
register char *__a = (a), *__b = (b); \ register TYPE *pi = (TYPE *) (parmi); \
do \ register TYPE *pj = (TYPE *) (parmj); \
{ \ do { \
char __tmp = *__a; \ register TYPE t = *pi; \
*__a++ = *__b; \ *pi++ = *pj; \
*__b++ = __tmp; \ *pj++ = t; \
} while (--__size > 0); \ } while (--i > 0); \
} while (0) }
/* Discontinue quicksort algorithm when partition gets below this size.
This particular magic number was chosen to work best on a Sun 4/260. */
#define MAX_THRESH 4
/* Stack node declarations used to store unfulfilled partition obligations. */
typedef struct
{
char *lo;
char *hi;
} stack_node;
/* The next 4 #defines implement a very fast in-line stack abstraction. */
/* The stack needs log (total_elements) entries (we could even subtract
log(MAX_THRESH)). Since total_elements has type size_t, we get as
upper bound for log (total_elements):
bits per byte (CHAR_BIT) * sizeof(size_t). */
#define STACK_SIZE (8 * sizeof(unsigned long int))
#define PUSH(low, high) ((void) ((top->lo = (low)), (top->hi = (high)), ++top))
#define POP(low, high) ((void) (--top, (low = top->lo), (high = top->hi)))
#define STACK_NOT_EMPTY (stack < top)
/* Order size using quicksort. This implementation incorporates #define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
four optimizations discussed in Sedgewick: es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
1. Non-recursive, using an explicit stack of pointer that store the static __inline void
next array partition to sort. To save time, this maximum amount swapfunc(char *a, char *b, int n, int swaptype)
of space required to store an array of SIZE_MAX is allocated on the {
stack. Assuming a 32-bit (64 bit) integer for size_t, this needs if (swaptype <= 1)
only 32 * sizeof(stack_node) == 256 bytes (for 64 bit: 1024 bytes). swapcode(long, a, b, n)
Pretty cheap, actually. else
swapcode(char, a, b, n)
}
2. Chose the pivot element using a median-of-three decision tree. #define swap(a, b) \
This reduces the probability of selecting a bad pivot value and if (swaptype == 0) { \
eliminates certain extraneous comparisons. long t = *(long *)(a); \
*(long *)(a) = *(long *)(b); \
*(long *)(b) = t; \
} else \
swapfunc(a, b, es, swaptype)
3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving #define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype)
insertion sort to order the MAX_THRESH items within each partition.
This is a big win, since insertion sort is faster for small, mostly
sorted array segments.
4. The larger of the two sub-partitions is always pushed onto the static __inline char *
stack first, with the algorithm then concentrating on the med3(char *a, char *b, char *c, int (*cmp)(const void *, const void *))
smaller partition. This *guarantees* no more than log (total_elems) {
stack size is needed (actually O(1) in this case)! */ return cmp(a, b) < 0 ?
(cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a ))
:(cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c ));
}
void void
qsort (void *const pbase, size_t total_elems, size_t size, qsort(void *aa, size_t n, size_t es, int (*cmp)(const void *, const void *))
int (*cmp)(const void *, const void *))
{ {
register char *base_ptr = (char *) pbase; char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
int d, r, swaptype, swap_cnt;
const size_t max_thresh = MAX_THRESH * size; register char *a = aa;
if (total_elems == 0) loop: SWAPINIT(a, es);
/* Avoid lossage with unsigned arithmetic below. */ swap_cnt = 0;
if (n < 7) {
for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es)
for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
pl -= es)
swap(pl, pl - es);
return; return;
if (total_elems > MAX_THRESH)
{
char *lo = base_ptr;
char *hi = &lo[size * (total_elems - 1)];
stack_node stack[STACK_SIZE];
stack_node *top = stack + 1;
while (STACK_NOT_EMPTY)
{
char *left_ptr;
char *right_ptr;
/* Select median value from among LO, MID, and HI. Rearrange
LO and HI so the three values are sorted. This lowers the
probability of picking a pathological pivot value and
skips a comparison for both the LEFT_PTR and RIGHT_PTR in
the while loops. */
char *mid = lo + size * ((hi - lo) / size >> 1);
if ((*cmp) ((void *) mid, (void *) lo) < 0)
SWAP (mid, lo, size);
if ((*cmp) ((void *) hi, (void *) mid) < 0)
SWAP (mid, hi, size);
else
goto jump_over;
if ((*cmp) ((void *) mid, (void *) lo) < 0)
SWAP (mid, lo, size);
jump_over:;
left_ptr = lo + size;
right_ptr = hi - size;
/* Here's the famous ``collapse the walls'' section of quicksort.
Gotta like those tight inner loops! They are the main reason
that this algorithm runs much faster than others. */
do
{
while ((*cmp) ((void *) left_ptr, (void *) mid) < 0)
left_ptr += size;
while ((*cmp) ((void *) mid, (void *) right_ptr) < 0)
right_ptr -= size;
if (left_ptr < right_ptr)
{
SWAP (left_ptr, right_ptr, size);
if (mid == left_ptr)
mid = right_ptr;
else if (mid == right_ptr)
mid = left_ptr;
left_ptr += size;
right_ptr -= size;
} }
else if (left_ptr == right_ptr) pm = (char *)a + (n / 2) * es;
{ if (n > 7) {
left_ptr += size; pl = (char *)a;
right_ptr -= size; pn = (char *)a + (n - 1) * es;
break; if (n > 40) {
d = (n / 8) * es;
pl = med3(pl, pl + d, pl + 2 * d, cmp);
pm = med3(pm - d, pm, pm + d, cmp);
pn = med3(pn - 2 * d, pn - d, pn, cmp);
} }
pm = med3(pl, pm, pn, cmp);
} }
while (left_ptr <= right_ptr); swap(a, pm);
pa = pb = (char *)a + es;
/* Set up pointers for next iteration. First determine whether
left and right partitions are below the threshold size. If so, pc = pd = (char *)a + (n - 1) * es;
ignore one or both. Otherwise, push the larger partition's for (;;) {
bounds on the stack and continue sorting the smaller one. */ while (pb <= pc && (r = cmp(pb, a)) <= 0) {
if (r == 0) {
if ((size_t) (right_ptr - lo) <= max_thresh) swap_cnt = 1;
{ swap(pa, pb);
if ((size_t) (hi - left_ptr) <= max_thresh) pa += es;
/* Ignore both small partitions. */
POP (lo, hi);
else
/* Ignore small left partition. */
lo = left_ptr;
}
else if ((size_t) (hi - left_ptr) <= max_thresh)
/* Ignore small right partition. */
hi = right_ptr;
else if ((right_ptr - lo) > (hi - left_ptr))
{
/* Push larger left partition indices. */
PUSH (lo, right_ptr);
lo = left_ptr;
} }
else pb += es;
{
/* Push larger right partition indices. */
PUSH (left_ptr, hi);
hi = right_ptr;
} }
while (pb <= pc && (r = cmp(pc, a)) >= 0) {
if (r == 0) {
swap_cnt = 1;
swap(pc, pd);
pd -= es;
} }
pc -= es;
} }
if (pb > pc)
/* Once the BASE_PTR array is partially sorted by quicksort the rest break;
is completely sorted using insertion sort, since this is efficient swap(pb, pc);
for partitions below MAX_THRESH size. BASE_PTR points to the beginning swap_cnt = 1;
of the array to sort, and END_PTR points at the very last element in pb += es;
the array (*not* one beyond it!). */ pc -= es;
{
char *const end_ptr = &base_ptr[size * (total_elems - 1)];
char *tmp_ptr = base_ptr;
char *const thresh = min_t(char *const, end_ptr, base_ptr + max_thresh);
register char *run_ptr;
/* Find smallest element in first threshold and place it at the
array's beginning. This is the smallest array element,
and the operation speeds up insertion sort's inner loop. */
for (run_ptr = tmp_ptr + size; run_ptr <= thresh; run_ptr += size)
if ((*cmp) ((void *) run_ptr, (void *) tmp_ptr) < 0)
tmp_ptr = run_ptr;
if (tmp_ptr != base_ptr)
SWAP (tmp_ptr, base_ptr, size);
/* Insertion sort, running from left-hand-side up to right-hand-side. */
run_ptr = base_ptr + size;
while ((run_ptr += size) <= end_ptr)
{
tmp_ptr = run_ptr - size;
while ((*cmp) ((void *) run_ptr, (void *) tmp_ptr) < 0)
tmp_ptr -= size;
tmp_ptr += size;
if (tmp_ptr != run_ptr)
{
char *trav;
trav = run_ptr + size;
while (--trav >= run_ptr)
{
char c = *trav;
char *hi, *lo;
for (hi = lo = trav; (lo -= size) >= tmp_ptr; hi = lo)
*hi = *lo;
*hi = c;
}
} }
if (swap_cnt == 0) { /* Switch to insertion sort */
for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es)
for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
pl -= es)
swap(pl, pl - es);
return;
} }
pn = (char *)a + n * es;
r = min(pa - (char *)a, pb - pa);
vecswap(a, pb - r, r);
r = min((long)(pd - pc), (long)(pn - pd - es));
vecswap(pb, pn - r, r);
if ((r = pb - pa) > es)
qsort(a, r / es, es, cmp);
if ((r = pd - pc) > es) {
/* Iterate rather than recurse to save stack space */
a = pn - r;
n = r / es;
goto loop;
} }
/* qsort(pn - r, r / es, es, cmp);*/
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment