1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Peter McIlroy. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #if defined(LIBC_SCCS) && !defined(lint) 36 static char sccsid[] = "@(#)merge.c 8.2 (Berkeley) 2/14/94"; 37 #endif /* LIBC_SCCS and not lint */ 38 39 /* 40 * Hybrid exponential search/linear search merge sort with hybrid 41 * natural/pairwise first pass. Requires about .3% more comparisons 42 * for random data than LSMS with pairwise first pass alone. 43 * It works for objects as small as two bytes. 44 */ 45 46 #define NATURAL 47 #define THRESHOLD 16 /* Best choice for natural merge cut-off. */ 48 49 /* #define NATURAL to get hybrid natural merge. 50 * (The default is pairwise merging.) 51 */ 52 53 #include <sys/types.h> 54 #include <stdint.h> 55 56 #include <errno.h> 57 #include <stdlib.h> 58 #include <string.h> 59 60 typedef int (*cmp_t)(const void *, const void *); 61 #define CMP(x, y) cmp(x, y) 62 63 static void setup(u_char *, u_char *, size_t, size_t, cmp_t); 64 static void insertionsort(u_char *, size_t, size_t, cmp_t); 65 66 #define ISIZE sizeof(int) 67 #define PSIZE sizeof(u_char *) 68 #define ICOPY_LIST(src, dst, last) \ 69 do \ 70 *(int*)dst = *(int*)src, src += ISIZE, dst += ISIZE; \ 71 while(src < last) 72 #define ICOPY_ELT(src, dst, i) \ 73 do \ 74 *(int*) dst = *(int*) src, src += ISIZE, dst += ISIZE; \ 75 while (i -= ISIZE) 76 77 #define CCOPY_LIST(src, dst, last) \ 78 do \ 79 *dst++ = *src++; \ 80 while (src < last) 81 #define CCOPY_ELT(src, dst, i) \ 82 do \ 83 *dst++ = *src++; \ 84 while (i -= 1) 85 86 /* 87 * Find the next possible pointer head. (Trickery for forcing an array 88 * to do double duty as a linked list when objects do not align with word 89 * boundaries. 90 */ 91 /* Assumption: PSIZE is a power of 2. */ 92 #define roundup2(x, y) (((x) + ((y) - 1)) & (~((y) - 1))) 93 #define EVAL(p) (u_char **)roundup2((uintptr_t)p, PSIZE) 94 95 /* 96 * Arguments are as for qsort. 97 */ 98 int 99 mergesort(void *base, size_t nmemb, size_t size, cmp_t cmp) 100 { 101 size_t i; 102 int sense; 103 int big, iflag; 104 u_char *f1, *f2, *t, *b, *tp2, *q, *l1, *l2; 105 u_char *list2, *list1, *p2, *p, *last, **p1; 106 107 if (size < PSIZE / 2) { /* Pointers must fit into 2 * size. */ 108 errno = EINVAL; 109 return (-1); 110 } 111 112 if (nmemb == 0) 113 return (0); 114 115 iflag = 0; 116 if (!(size % ISIZE) && !(((char *)base - (char *)0) % ISIZE)) 117 iflag = 1; 118 119 if ((list2 = malloc(nmemb * size + PSIZE)) == NULL) 120 return (-1); 121 122 list1 = base; 123 setup(list1, list2, nmemb, size, cmp); 124 last = list2 + nmemb * size; 125 i = big = 0; 126 while (*EVAL(list2) != last) { 127 l2 = list1; 128 p1 = EVAL(list1); 129 for (tp2 = p2 = list2; p2 != last; p1 = EVAL(l2)) { 130 p2 = *EVAL(p2); 131 f1 = l2; 132 f2 = l1 = list1 + (p2 - list2); 133 if (p2 != last) 134 p2 = *EVAL(p2); 135 l2 = list1 + (p2 - list2); 136 while (f1 < l1 && f2 < l2) { 137 if (CMP(f1, f2) <= 0) { 138 q = f2; 139 b = f1, t = l1; 140 sense = -1; 141 } else { 142 q = f1; 143 b = f2, t = l2; 144 sense = 0; 145 } 146 if (!big) { /* here i = 0 */ 147 while ((b += size) < t && CMP(q, b) >sense) 148 if (++i == 6) { 149 big = 1; 150 goto EXPONENTIAL; 151 } 152 } else { 153 EXPONENTIAL: for (i = size; ; i <<= 1) 154 if ((p = (b + i)) >= t) { 155 if ((p = t - size) > b && 156 CMP(q, p) <= sense) 157 t = p; 158 else 159 b = p; 160 break; 161 } else if (CMP(q, p) <= sense) { 162 t = p; 163 if (i == size) 164 big = 0; 165 goto FASTCASE; 166 } else 167 b = p; 168 while (t > b+size) { 169 i = (((t - b) / size) >> 1) * size; 170 if (CMP(q, p = b + i) <= sense) 171 t = p; 172 else 173 b = p; 174 } 175 goto COPY; 176 FASTCASE: while (i > size) 177 if (CMP(q, 178 p = b + (i >>= 1)) <= sense) 179 t = p; 180 else 181 b = p; 182 COPY: b = t; 183 } 184 i = size; 185 if (q == f1) { 186 if (iflag) { 187 ICOPY_LIST(f2, tp2, b); 188 ICOPY_ELT(f1, tp2, i); 189 } else { 190 CCOPY_LIST(f2, tp2, b); 191 CCOPY_ELT(f1, tp2, i); 192 } 193 } else { 194 if (iflag) { 195 ICOPY_LIST(f1, tp2, b); 196 ICOPY_ELT(f2, tp2, i); 197 } else { 198 CCOPY_LIST(f1, tp2, b); 199 CCOPY_ELT(f2, tp2, i); 200 } 201 } 202 } 203 if (f2 < l2) { 204 if (iflag) 205 ICOPY_LIST(f2, tp2, l2); 206 else 207 CCOPY_LIST(f2, tp2, l2); 208 } else if (f1 < l1) { 209 if (iflag) 210 ICOPY_LIST(f1, tp2, l1); 211 else 212 CCOPY_LIST(f1, tp2, l1); 213 } 214 *p1 = l2; 215 } 216 tp2 = list1; /* swap list1, list2 */ 217 list1 = list2; 218 list2 = tp2; 219 last = list2 + nmemb*size; 220 } 221 if (base == list2) { 222 memmove(list2, list1, nmemb*size); 223 list2 = list1; 224 } 225 free(list2); 226 return (0); 227 } 228 229 #define swap(a, b) { \ 230 s = b; \ 231 i = size; \ 232 do { \ 233 tmp = *a; *a++ = *s; *s++ = tmp; \ 234 } while (--i); \ 235 a -= size; \ 236 } 237 #define reverse(bot, top) { \ 238 s = top; \ 239 do { \ 240 i = size; \ 241 do { \ 242 tmp = *bot; *bot++ = *s; *s++ = tmp; \ 243 } while (--i); \ 244 s -= size2; \ 245 } while(bot < s); \ 246 } 247 248 /* 249 * Optional hybrid natural/pairwise first pass. Eats up list1 in runs of 250 * increasing order, list2 in a corresponding linked list. Checks for runs 251 * when THRESHOLD/2 pairs compare with same sense. (Only used when NATURAL 252 * is defined. Otherwise simple pairwise merging is used.) 253 */ 254 static 255 void 256 setup(u_char *list1, u_char *list2, size_t n, size_t size, cmp_t cmp) 257 { 258 int i, length, size2, tmp, sense; 259 u_char *f1, *f2, *s, *l2, *last, *p2; 260 261 size2 = size*2; 262 if (n <= 5) { 263 insertionsort(list1, n, size, cmp); 264 *EVAL(list2) = (u_char*) list2 + n*size; 265 return; 266 } 267 /* 268 * Avoid running pointers out of bounds; limit n to evens 269 * for simplicity. 270 */ 271 i = 4 + (n & 1); 272 insertionsort(list1 + (n - i) * size, i, size, cmp); 273 last = list1 + size * (n - i); 274 *EVAL(list2 + (last - list1)) = list2 + n * size; 275 276 #ifdef NATURAL 277 p2 = list2; 278 f1 = list1; 279 sense = (CMP(f1, f1 + size) > 0); 280 for (; f1 < last; sense = !sense) { 281 length = 2; 282 /* Find pairs with same sense. */ 283 for (f2 = f1 + size2; f2 < last; f2 += size2) { 284 if ((CMP(f2, f2+ size) > 0) != sense) 285 break; 286 length += 2; 287 } 288 if (length < THRESHOLD) { /* Pairwise merge */ 289 do { 290 p2 = *EVAL(p2) = f1 + size2 - list1 + list2; 291 if (sense > 0) 292 swap (f1, f1 + size); 293 } while ((f1 += size2) < f2); 294 } else { /* Natural merge */ 295 l2 = f2; 296 for (f2 = f1 + size2; f2 < l2; f2 += size2) { 297 if ((CMP(f2-size, f2) > 0) != sense) { 298 p2 = *EVAL(p2) = f2 - list1 + list2; 299 if (sense > 0) 300 reverse(f1, f2-size); 301 f1 = f2; 302 } 303 } 304 if (sense > 0) 305 reverse (f1, f2-size); 306 f1 = f2; 307 if (f2 < last || CMP(f2 - size, f2) > 0) 308 p2 = *EVAL(p2) = f2 - list1 + list2; 309 else 310 p2 = *EVAL(p2) = list2 + n*size; 311 } 312 } 313 #else /* pairwise merge only. */ 314 for (f1 = list1, p2 = list2; f1 < last; f1 += size2) { 315 p2 = *EVAL(p2) = p2 + size2; 316 if (CMP (f1, f1 + size) > 0) 317 swap(f1, f1 + size); 318 } 319 #endif /* NATURAL */ 320 } 321 322 /* 323 * This is to avoid out-of-bounds addresses in sorting the 324 * last 4 elements. 325 */ 326 static void 327 insertionsort(u_char *a, size_t n, size_t size, cmp_t cmp) 328 { 329 u_char *ai, *s, *t, *u, tmp; 330 int i; 331 332 for (ai = a+size; --n >= 1; ai += size) 333 for (t = ai; t > a; t -= size) { 334 u = t - size; 335 if (CMP(u, t) <= 0) 336 break; 337 swap(u, t); 338 } 339 } 340