1 /* 2 * Copyright (c) 1996-1997 3 * Silicon Graphics Computer Systems, Inc. 4 * 5 * Permission to use, copy, modify, distribute and sell this software 6 * and its documentation for any purpose is hereby granted without fee, 7 * provided that the above copyright notice appear in all copies and 8 * that both that copyright notice and this permission notice appear 9 * in supporting documentation. Silicon Graphics makes no 10 * representations about the suitability of this software for any 11 * purpose. It is provided "as is" without express or implied warranty. 12 */ 13 14 /* NOTE: This is an internal header file, included by other STL headers. 15 * You should not attempt to use it directly. 16 */ 17 18 #ifndef __SGI_STL_INTERNAL_ALLOC_H 19 #define __SGI_STL_INTERNAL_ALLOC_H 20 21 #ifdef __SUNPRO_CC 22 # define __PRIVATE public 23 // Extra access restrictions prevent us from really making some things 24 // private. 25 #else 26 # define __PRIVATE private 27 #endif 28 29 #ifdef __STL_STATIC_TEMPLATE_MEMBER_BUG 30 # define __USE_MALLOC 31 #endif 32 33 // This implements some standard node allocators. These are 34 // NOT the same as the allocators in the C++ draft standard or in 35 // in the original STL. They do not encapsulate different pointer 36 // types; indeed we assume that there is only one pointer type. 37 // The allocation primitives are intended to allocate individual objects, 38 // not larger arenas as with the original STL allocators. 39 40 #if 0 41 # include <new> 42 # define __THROW_BAD_ALLOC throw bad_alloc() 43 #elif !defined(__THROW_BAD_ALLOC) 44 #if (defined(__BEOS__) || defined(__HAIKU__)) 45 # include <stdio.h> 46 # define __THROW_BAD_ALLOC fprintf(stderr, "out of memory\n"); exit(1) 47 #else 48 # include <iostream.h> 49 # define __THROW_BAD_ALLOC cerr << "out of memory" << endl; exit(1) 50 #endif 51 #endif 52 53 #ifdef __STL_WIN32THREADS 54 # include <windows.h> 55 #endif 56 57 #include <stddef.h> 58 #include <stdlib.h> 59 #include <string.h> 60 #include <assert.h> 61 #ifndef __RESTRICT 62 # define __RESTRICT 63 #endif 64 65 #if !defined(__STL_PTHREADS) && !defined(__STL_SOLTHREADS) \ 66 && !defined(_NOTHREADS) \ 67 && !defined(__STL_SGI_THREADS) && !defined(__STL_WIN32THREADS) 68 # define _NOTHREADS 69 #endif 70 71 # ifdef __STL_PTHREADS 72 // POSIX Threads 73 // This is dubious, since this is likely to be a high contention 74 // lock. Performance may not be adequate. 75 # include <pthread.h> 76 # define __NODE_ALLOCATOR_LOCK \ 77 if (threads) pthread_mutex_lock(&_S_node_allocator_lock) 78 # define __NODE_ALLOCATOR_UNLOCK \ 79 if (threads) pthread_mutex_unlock(&_S_node_allocator_lock) 80 # define __NODE_ALLOCATOR_THREADS true 81 # define __VOLATILE volatile // Needed at -O3 on SGI 82 # endif 83 # ifdef __STL_SOLTHREADS 84 # include <thread.h> 85 # define __NODE_ALLOCATOR_LOCK \ 86 if (threads) mutex_lock(&_S_node_allocator_lock) 87 # define __NODE_ALLOCATOR_UNLOCK \ 88 if (threads) mutex_unlock(&_S_node_allocator_lock) 89 # define __NODE_ALLOCATOR_THREADS true 90 # define __VOLATILE 91 # endif 92 # ifdef __STL_WIN32THREADS 93 // The lock needs to be initialized by constructing an allocator 94 // objects of the right type. We do that here explicitly for alloc. 95 # define __NODE_ALLOCATOR_LOCK \ 96 EnterCriticalSection(&_S_node_allocator_lock) 97 # define __NODE_ALLOCATOR_UNLOCK \ 98 LeaveCriticalSection(&_S_node_allocator_lock) 99 # define __NODE_ALLOCATOR_THREADS true 100 # define __VOLATILE volatile // may not be needed 101 # endif /* WIN32THREADS */ 102 # ifdef __STL_SGI_THREADS 103 // This should work without threads, with sproc threads, or with 104 // pthreads. It is suboptimal in all cases. 105 // It is unlikely to even compile on nonSGI machines. 106 107 extern "C" { 108 extern int __us_rsthread_malloc; 109 } 110 // The above is copied from malloc.h. Including <malloc.h> 111 // would be cleaner but fails with certain levels of standard 112 // conformance. 113 # define __NODE_ALLOCATOR_LOCK if (threads && __us_rsthread_malloc) \ 114 { _S_lock(&_S_node_allocator_lock); } 115 # define __NODE_ALLOCATOR_UNLOCK if (threads && __us_rsthread_malloc) \ 116 { _S_unlock(&_S_node_allocator_lock); } 117 # define __NODE_ALLOCATOR_THREADS true 118 # define __VOLATILE volatile // Needed at -O3 on SGI 119 # endif 120 # ifdef _NOTHREADS 121 // Thread-unsafe 122 # define __NODE_ALLOCATOR_LOCK 123 # define __NODE_ALLOCATOR_UNLOCK 124 # define __NODE_ALLOCATOR_THREADS false 125 # define __VOLATILE 126 # endif 127 128 __STL_BEGIN_NAMESPACE 129 130 #if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32) 131 #pragma set woff 1174 132 #endif 133 134 // Malloc-based allocator. Typically slower than default alloc below. 135 // Typically thread-safe and more storage efficient. 136 #ifdef __STL_STATIC_TEMPLATE_MEMBER_BUG 137 # ifdef __DECLARE_GLOBALS_HERE 138 void (* __malloc_alloc_oom_handler)() = 0; 139 // g++ 2.7.2 does not handle static template data members. 140 # else 141 extern void (* __malloc_alloc_oom_handler)(); 142 # endif 143 #endif 144 145 template <int __inst> 146 class __malloc_alloc_template { 147 148 private: 149 150 static void* _S_oom_malloc(size_t); 151 static void* _S_oom_realloc(void*, size_t); 152 153 #ifndef __STL_STATIC_TEMPLATE_MEMBER_BUG 154 static void (* __malloc_alloc_oom_handler)(); 155 #endif 156 157 public: 158 159 static void* allocate(size_t __n) 160 { 161 void* __result = malloc(__n); 162 if (0 == __result) __result = _S_oom_malloc(__n); 163 return __result; 164 } 165 166 static void deallocate(void* __p, size_t /* __n */) 167 { 168 free(__p); 169 } 170 171 static void* reallocate(void* __p, size_t /* old_sz */, size_t __new_sz) 172 { 173 void* __result = realloc(__p, __new_sz); 174 if (0 == __result) __result = _S_oom_realloc(__p, __new_sz); 175 return __result; 176 } 177 178 static void (* __set_malloc_handler(void (*__f)()))() 179 { 180 void (* __old)() = __malloc_alloc_oom_handler; 181 __malloc_alloc_oom_handler = __f; 182 return(__old); 183 } 184 185 }; 186 187 // malloc_alloc out-of-memory handling 188 189 #ifndef __STL_STATIC_TEMPLATE_MEMBER_BUG 190 template <int __inst> 191 void (* __malloc_alloc_template<__inst>::__malloc_alloc_oom_handler)() = 0; 192 #endif 193 194 template <int __inst> 195 void* 196 __malloc_alloc_template<__inst>::_S_oom_malloc(size_t __n) 197 { 198 void (* __my_malloc_handler)(); 199 void* __result; 200 201 for (;;) { 202 __my_malloc_handler = __malloc_alloc_oom_handler; 203 if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; } 204 (*__my_malloc_handler)(); 205 __result = malloc(__n); 206 if (__result) return(__result); 207 } 208 } 209 210 template <int __inst> 211 void* __malloc_alloc_template<__inst>::_S_oom_realloc(void* __p, size_t __n) 212 { 213 void (* __my_malloc_handler)(); 214 void* __result; 215 216 for (;;) { 217 __my_malloc_handler = __malloc_alloc_oom_handler; 218 if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; } 219 (*__my_malloc_handler)(); 220 __result = realloc(__p, __n); 221 if (__result) return(__result); 222 } 223 } 224 225 typedef __malloc_alloc_template<0> malloc_alloc; 226 227 template<class _Tp, class _Alloc> 228 class simple_alloc { 229 230 public: 231 static _Tp* allocate(size_t __n) 232 { return 0 == __n ? 0 : (_Tp*) _Alloc::allocate(__n * sizeof (_Tp)); } 233 static _Tp* allocate(void) 234 { return (_Tp*) _Alloc::allocate(sizeof (_Tp)); } 235 static void deallocate(_Tp* __p, size_t __n) 236 { if (0 != __n) _Alloc::deallocate(__p, __n * sizeof (_Tp)); } 237 static void deallocate(_Tp* __p) 238 { _Alloc::deallocate(__p, sizeof (_Tp)); } 239 }; 240 241 // Allocator adaptor to check size arguments for debugging. 242 // Reports errors using assert. Checking can be disabled with 243 // NDEBUG, but it's far better to just use the underlying allocator 244 // instead when no checking is desired. 245 // There is some evidence that this can confuse Purify. 246 template <class _Alloc> 247 class debug_alloc { 248 249 private: 250 251 enum {_S_extra = 8}; // Size of space used to store size. Note 252 // that this must be large enough to preserve 253 // alignment. 254 255 public: 256 257 static void* allocate(size_t __n) 258 { 259 char* __result = (char*)_Alloc::allocate(__n + _S_extra); 260 *(size_t*)__result = __n; 261 return __result + _S_extra; 262 } 263 264 static void deallocate(void* __p, size_t __n) 265 { 266 char* __real_p = (char*)__p - _S_extra; 267 assert(*(size_t*)__real_p == __n); 268 _Alloc::deallocate(__real_p, __n + _S_extra); 269 } 270 271 static void* reallocate(void* __p, size_t __old_sz, size_t __new_sz) 272 { 273 char* __real_p = (char*)__p - _S_extra; 274 assert(*(size_t*)__real_p == __old_sz); 275 char* __result = (char*) 276 _Alloc::reallocate(__real_p, __old_sz + _S_extra, __new_sz + _S_extra); 277 *(size_t*)__result = __new_sz; 278 return __result + _S_extra; 279 } 280 281 }; 282 283 284 # ifdef __USE_MALLOC 285 286 typedef malloc_alloc alloc; 287 typedef malloc_alloc single_client_alloc; 288 289 # else 290 291 292 // Default node allocator. 293 // With a reasonable compiler, this should be roughly as fast as the 294 // original STL class-specific allocators, but with less fragmentation. 295 // Default_alloc_template parameters are experimental and MAY 296 // DISAPPEAR in the future. Clients should just use alloc for now. 297 // 298 // Important implementation properties: 299 // 1. If the client request an object of size > _MAX_BYTES, the resulting 300 // object will be obtained directly from malloc. 301 // 2. In all other cases, we allocate an object of size exactly 302 // _S_round_up(requested_size). Thus the client has enough size 303 // information that we can return the object to the proper free list 304 // without permanently losing part of the object. 305 // 306 307 // The first template parameter specifies whether more than one thread 308 // may use this allocator. It is safe to allocate an object from 309 // one instance of a default_alloc and deallocate it with another 310 // one. This effectively transfers its ownership to the second one. 311 // This may have undesirable effects on reference locality. 312 // The second parameter is unreferenced and serves only to allow the 313 // creation of multiple default_alloc instances. 314 // Node that containers built on different allocator instances have 315 // different types, limiting the utility of this approach. 316 #ifdef __SUNPRO_CC 317 // breaks if we make these template class members: 318 enum {_ALIGN = 8}; 319 enum {_MAX_BYTES = 128}; 320 enum {_NFREELISTS = _MAX_BYTES/_ALIGN}; 321 #endif 322 323 template <bool threads, int inst> 324 class __default_alloc_template { 325 326 private: 327 // Really we should use static const int x = N 328 // instead of enum { x = N }, but few compilers accept the former. 329 # ifndef __SUNPRO_CC 330 enum {_ALIGN = 8}; 331 enum {_MAX_BYTES = 128}; 332 enum {_NFREELISTS = _MAX_BYTES/_ALIGN}; 333 # endif 334 static size_t 335 _S_round_up(size_t __bytes) 336 { return (((__bytes) + _ALIGN-1) & ~(_ALIGN - 1)); } 337 338 __PRIVATE: 339 union _Obj { 340 union _Obj* _M_free_list_link; 341 char _M_client_data[1]; /* The client sees this. */ 342 }; 343 private: 344 # ifdef __SUNPRO_CC 345 static _Obj* __VOLATILE _S_free_list[]; 346 // Specifying a size results in duplicate def for 4.1 347 # else 348 static _Obj* __VOLATILE _S_free_list[_NFREELISTS]; 349 # endif 350 static size_t _S_freelist_index(size_t __bytes) { 351 return (((__bytes) + _ALIGN-1)/_ALIGN - 1); 352 } 353 354 // Returns an object of size __n, and optionally adds to size __n free list. 355 static void* _S_refill(size_t __n); 356 // Allocates a chunk for nobjs of size "size". nobjs may be reduced 357 // if it is inconvenient to allocate the requested number. 358 static char* _S_chunk_alloc(size_t __size, int& __nobjs); 359 360 // Chunk allocation state. 361 static char* _S_start_free; 362 static char* _S_end_free; 363 static size_t _S_heap_size; 364 365 # ifdef __STL_SGI_THREADS 366 static volatile unsigned long _S_node_allocator_lock; 367 static void _S_lock(volatile unsigned long*); 368 static inline void _S_unlock(volatile unsigned long*); 369 # endif 370 371 # ifdef __STL_PTHREADS 372 static pthread_mutex_t _S_node_allocator_lock; 373 # endif 374 375 # ifdef __STL_SOLTHREADS 376 static mutex_t _S_node_allocator_lock; 377 # endif 378 379 # ifdef __STL_WIN32THREADS 380 static CRITICAL_SECTION _S_node_allocator_lock; 381 static bool _S_node_allocator_lock_initialized; 382 383 public: 384 __default_alloc_template() { 385 // This assumes the first constructor is called before threads 386 // are started. 387 if (!_S_node_allocator_lock_initialized) { 388 InitializeCriticalSection(&_S_node_allocator_lock); 389 _S_node_allocator_lock_initialized = true; 390 } 391 } 392 private: 393 # endif 394 395 class _Lock { 396 public: 397 _Lock() { __NODE_ALLOCATOR_LOCK; } 398 ~_Lock() { __NODE_ALLOCATOR_UNLOCK; } 399 }; 400 friend class _Lock; 401 402 public: 403 404 /* __n must be > 0 */ 405 static void* allocate(size_t __n) 406 { 407 _Obj* __VOLATILE* __my_free_list; 408 _Obj* __RESTRICT __result; 409 410 if (__n > (size_t) _MAX_BYTES) { 411 return(malloc_alloc::allocate(__n)); 412 } 413 __my_free_list = _S_free_list + _S_freelist_index(__n); 414 // Acquire the lock here with a constructor call. 415 // This ensures that it is released in exit or during stack 416 // unwinding. 417 # ifndef _NOTHREADS 418 /*REFERENCED*/ 419 _Lock __lock_instance; 420 # endif 421 __result = *__my_free_list; 422 if (__result == 0) { 423 void* __r = _S_refill(_S_round_up(__n)); 424 return __r; 425 } 426 *__my_free_list = __result -> _M_free_list_link; 427 return (__result); 428 }; 429 430 /* __p may not be 0 */ 431 static void deallocate(void* __p, size_t __n) 432 { 433 _Obj* __q = (_Obj*)__p; 434 _Obj* __VOLATILE* __my_free_list; 435 436 if (__n > (size_t) _MAX_BYTES) { 437 malloc_alloc::deallocate(__p, __n); 438 return; 439 } 440 __my_free_list = _S_free_list + _S_freelist_index(__n); 441 // acquire lock 442 # ifndef _NOTHREADS 443 /*REFERENCED*/ 444 _Lock __lock_instance; 445 # endif /* _NOTHREADS */ 446 __q -> _M_free_list_link = *__my_free_list; 447 *__my_free_list = __q; 448 // lock is released here 449 } 450 451 static void* reallocate(void* __p, size_t __old_sz, size_t __new_sz); 452 453 } ; 454 455 typedef __default_alloc_template<__NODE_ALLOCATOR_THREADS, 0> alloc; 456 typedef __default_alloc_template<false, 0> single_client_alloc; 457 458 459 460 /* We allocate memory in large chunks in order to avoid fragmenting */ 461 /* the malloc heap too much. */ 462 /* We assume that size is properly aligned. */ 463 /* We hold the allocation lock. */ 464 template <bool __threads, int __inst> 465 char* 466 __default_alloc_template<__threads, __inst>::_S_chunk_alloc(size_t __size, 467 int& __nobjs) 468 { 469 char* __result; 470 size_t __total_bytes = __size * __nobjs; 471 size_t __bytes_left = _S_end_free - _S_start_free; 472 473 if (__bytes_left >= __total_bytes) { 474 __result = _S_start_free; 475 _S_start_free += __total_bytes; 476 return(__result); 477 } else if (__bytes_left >= __size) { 478 __nobjs = (int)(__bytes_left/__size); 479 __total_bytes = __size * __nobjs; 480 __result = _S_start_free; 481 _S_start_free += __total_bytes; 482 return(__result); 483 } else { 484 size_t __bytes_to_get = 485 2 * __total_bytes + _S_round_up(_S_heap_size >> 4); 486 // Try to make use of the left-over piece. 487 if (__bytes_left > 0) { 488 _Obj* __VOLATILE* __my_free_list = 489 _S_free_list + _S_freelist_index(__bytes_left); 490 491 ((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list; 492 *__my_free_list = (_Obj*)_S_start_free; 493 } 494 _S_start_free = (char*)malloc(__bytes_to_get); 495 if (0 == _S_start_free) { 496 size_t __i; 497 _Obj* __VOLATILE* __my_free_list; 498 _Obj* __p; 499 // Try to make do with what we have. That can't 500 // hurt. We do not try smaller requests, since that tends 501 // to result in disaster on multi-process machines. 502 for (__i = __size; __i <= _MAX_BYTES; __i += _ALIGN) { 503 __my_free_list = _S_free_list + _S_freelist_index(__i); 504 __p = *__my_free_list; 505 if (0 != __p) { 506 *__my_free_list = __p -> _M_free_list_link; 507 _S_start_free = (char*)__p; 508 _S_end_free = _S_start_free + __i; 509 return(_S_chunk_alloc(__size, __nobjs)); 510 // Any leftover piece will eventually make it to the 511 // right free list. 512 } 513 } 514 _S_end_free = 0; // In case of exception. 515 _S_start_free = (char*)malloc_alloc::allocate(__bytes_to_get); 516 // This should either throw an 517 // exception or remedy the situation. Thus we assume it 518 // succeeded. 519 } 520 _S_heap_size += __bytes_to_get; 521 _S_end_free = _S_start_free + __bytes_to_get; 522 return(_S_chunk_alloc(__size, __nobjs)); 523 } 524 } 525 526 527 /* Returns an object of size __n, and optionally adds to size __n free list.*/ 528 /* We assume that __n is properly aligned. */ 529 /* We hold the allocation lock. */ 530 template <bool __threads, int __inst> 531 void* 532 __default_alloc_template<__threads, __inst>::_S_refill(size_t __n) 533 { 534 int __nobjs = 20; 535 char* __chunk = _S_chunk_alloc(__n, __nobjs); 536 _Obj* __VOLATILE* __my_free_list; 537 _Obj* __result; 538 _Obj* __current_obj; 539 _Obj* __next_obj; 540 int __i; 541 542 if (1 == __nobjs) return(__chunk); 543 __my_free_list = _S_free_list + _S_freelist_index(__n); 544 545 /* Build free list in chunk */ 546 __result = (_Obj*)__chunk; 547 *__my_free_list = __next_obj = (_Obj*)(__chunk + __n); 548 for (__i = 1; ; __i++) { 549 __current_obj = __next_obj; 550 __next_obj = (_Obj*)((char*)__next_obj + __n); 551 if (__nobjs - 1 == __i) { 552 __current_obj -> _M_free_list_link = 0; 553 break; 554 } else { 555 __current_obj -> _M_free_list_link = __next_obj; 556 } 557 } 558 return(__result); 559 } 560 561 template <bool threads, int inst> 562 void* 563 __default_alloc_template<threads, inst>::reallocate(void* __p, 564 size_t __old_sz, 565 size_t __new_sz) 566 { 567 void* __result; 568 size_t __copy_sz; 569 570 if (__old_sz > (size_t) _MAX_BYTES && __new_sz > (size_t) _MAX_BYTES) { 571 return(realloc(__p, __new_sz)); 572 } 573 if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return(__p); 574 __result = allocate(__new_sz); 575 __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz; 576 memcpy(__result, __p, __copy_sz); 577 deallocate(__p, __old_sz); 578 return(__result); 579 } 580 581 #ifdef __STL_PTHREADS 582 template <bool __threads, int __inst> 583 pthread_mutex_t 584 __default_alloc_template<__threads, __inst>::_S_node_allocator_lock 585 = PTHREAD_MUTEX_INITIALIZER; 586 #endif 587 588 #ifdef __STL_SOLTHREADS 589 template <bool __threads, int __inst> 590 mutex_t 591 __default_alloc_template<__threads, __inst>::_S_node_allocator_lock 592 = DEFAULTMUTEX; 593 #endif 594 595 #ifdef __STL_WIN32THREADS 596 template <bool __threads, int __inst> 597 CRITICAL_SECTION 598 __default_alloc_template<__threads, __inst>:: 599 _S_node_allocator_lock; 600 601 template <bool __threads, int __inst> 602 bool 603 __default_alloc_template<__threads, __inst>:: 604 _S_node_allocator_lock_initialized 605 = false; 606 #endif 607 608 #ifdef __STL_SGI_THREADS 609 __STL_END_NAMESPACE 610 #include <mutex.h> 611 #include <time.h> /* XXX should use <ctime> */ 612 __STL_BEGIN_NAMESPACE 613 // Somewhat generic lock implementations. We need only test-and-set 614 // and some way to sleep. These should work with both SGI pthreads 615 // and sproc threads. They may be useful on other systems. 616 template <bool __threads, int __inst> 617 volatile unsigned long 618 __default_alloc_template<__threads, __inst>::_S_node_allocator_lock = 0; 619 620 #if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) || defined(__GNUC__) 621 # define __test_and_set(l,v) test_and_set(l,v) 622 #endif 623 624 template <bool __threads, int __inst> 625 void 626 __default_alloc_template<__threads, __inst>:: 627 _S_lock(volatile unsigned long* __lock) 628 { 629 const unsigned __low_spin_max = 30; // spins if we suspect uniprocessor 630 const unsigned __high_spin_max = 1000; // spins for multiprocessor 631 static unsigned __spin_max = __low_spin_max; 632 unsigned __my_spin_max; 633 static unsigned __last_spins = 0; 634 unsigned __my_last_spins; 635 unsigned __junk; 636 # define __ALLOC_PAUSE \ 637 __junk *= __junk; __junk *= __junk; __junk *= __junk; __junk *= __junk 638 int __i; 639 640 if (!__test_and_set((unsigned long*)__lock, 1)) { 641 return; 642 } 643 __my_spin_max = __spin_max; 644 __my_last_spins = __last_spins; 645 for (__i = 0; __i < __my_spin_max; __i++) { 646 if (__i < __my_last_spins/2 || *__lock) { 647 __ALLOC_PAUSE; 648 continue; 649 } 650 if (!__test_and_set((unsigned long*)__lock, 1)) { 651 // got it! 652 // Spinning worked. Thus we're probably not being scheduled 653 // against the other process with which we were contending. 654 // Thus it makes sense to spin longer the next time. 655 __last_spins = __i; 656 __spin_max = __high_spin_max; 657 return; 658 } 659 } 660 // We are probably being scheduled against the other process. Sleep. 661 __spin_max = __low_spin_max; 662 for (__i = 0 ;; ++__i) { 663 struct timespec __ts; 664 int __log_nsec = __i + 6; 665 666 if (!__test_and_set((unsigned long *)__lock, 1)) { 667 return; 668 } 669 if (__log_nsec > 27) __log_nsec = 27; 670 /* Max sleep is 2**27nsec ~ 60msec */ 671 __ts.tv_sec = 0; 672 __ts.tv_nsec = 1 << __log_nsec; 673 nanosleep(&__ts, 0); 674 } 675 } 676 677 template <bool __threads, int __inst> 678 inline void 679 __default_alloc_template<__threads, __inst>::_S_unlock( 680 volatile unsigned long* __lock) 681 { 682 # if defined(__GNUC__) && __mips >= 3 683 asm("sync"); 684 *__lock = 0; 685 # elif __mips >= 3 && (defined (_ABIN32) || defined(_ABI64)) 686 __lock_release(__lock); 687 # else 688 *__lock = 0; 689 // This is not sufficient on many multiprocessors, since 690 // writes to protected variables and the lock may be reordered. 691 # endif 692 } 693 #endif 694 695 template <bool __threads, int __inst> 696 char* __default_alloc_template<__threads, __inst>::_S_start_free = 0; 697 698 template <bool __threads, int __inst> 699 char* __default_alloc_template<__threads, __inst>::_S_end_free = 0; 700 701 template <bool __threads, int __inst> 702 size_t __default_alloc_template<__threads, __inst>::_S_heap_size = 0; 703 704 template <bool __threads, int __inst> 705 __default_alloc_template<__threads, __inst>::_Obj* __VOLATILE 706 __default_alloc_template<__threads, __inst> ::_S_free_list[ 707 _NFREELISTS 708 ] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; 709 // The 16 zeros are necessary to make version 4.1 of the SunPro 710 // compiler happy. Otherwise it appears to allocate too little 711 // space for the array. 712 713 # ifdef __STL_WIN32THREADS 714 // Create one to get critical section initialized. 715 // We do this onece per file, but only the first constructor 716 // does anything. 717 static alloc __node_allocator_dummy_instance; 718 # endif 719 720 #endif /* ! __USE_MALLOC */ 721 722 // This implements allocators as specified in the C++ standard. 723 // 724 // Note that standard-conforming allocators use many language features 725 // that are not yet widely implemented. In particular, they rely on 726 // member templates, partial specialization, partial ordering of function 727 // templates, the typename keyword, and the use of the template keyword 728 // to refer to a template member of a dependent type. 729 730 #ifdef __STL_USE_STD_ALLOCATORS 731 732 template <class _Tp> 733 class allocator { 734 typedef alloc _Alloc; // The underlying allocator. 735 public: 736 typedef size_t size_type; 737 typedef ptrdiff_t difference_type; 738 typedef _Tp* pointer; 739 typedef const _Tp* const_pointer; 740 typedef _Tp& reference; 741 typedef const _Tp& const_reference; 742 typedef _Tp value_type; 743 744 template <class _Tp1> struct rebind { 745 typedef allocator<_Tp1> other; 746 }; 747 748 allocator() __STL_NOTHROW {} 749 allocator(const allocator&) __STL_NOTHROW {} 750 template <class _Tp1> allocator(const allocator<_Tp1>&) __STL_NOTHROW {} 751 ~allocator() __STL_NOTHROW {} 752 753 pointer address(reference __x) const { return &__x; } 754 const_pointer address(const_reference __x) const { return &__x; } 755 756 // __n is permitted to be 0. The C++ standard says nothing about what 757 // the return value is when __n == 0. 758 _Tp* allocate(size_type __n, const void* = 0) { 759 return __n != 0 ? static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp))) 760 : 0; 761 } 762 763 // __p is not permitted to be a null pointer. 764 void deallocate(pointer __p, size_type __n) 765 { _Alloc::deallocate(__p, __n * sizeof(_Tp)); } 766 767 size_type max_size() const __STL_NOTHROW 768 { return size_t(-1) / sizeof(_Tp); } 769 770 void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); } 771 void destroy(pointer __p) { __p->~_Tp(); } 772 }; 773 774 template<> 775 class allocator<void> { 776 typedef size_t size_type; 777 typedef ptrdiff_t difference_type; 778 typedef void* pointer; 779 typedef const void* const_pointer; 780 typedef void value_type; 781 782 template <class _Tp1> struct rebind { 783 typedef allocator<_Tp1> other; 784 }; 785 }; 786 787 788 template <class _T1, class _T2> 789 inline bool operator==(const allocator<_T1>&, const allocator<_T2>&) 790 { 791 return true; 792 } 793 794 template <class _T1, class _T2> 795 inline bool operator!=(const allocator<_T1>&, const allocator<_T2>&) 796 { 797 return false; 798 } 799 800 // Allocator adaptor to turn an SGI-style allocator (e.g. alloc, malloc_alloc) 801 // into a standard-conforming allocator. Note that this adaptor does 802 // *not* assume that all objects of the underlying alloc class are 803 // identical, nor does it assume that all of the underlying alloc's 804 // member functions are static member functions. Note, also, that 805 // __allocator<_Tp, alloc> is essentially the same thing as allocator<_Tp>. 806 807 template <class _Tp, class _Alloc> 808 struct __allocator { 809 _Alloc __underlying_alloc; 810 811 typedef size_t size_type; 812 typedef ptrdiff_t difference_type; 813 typedef _Tp* pointer; 814 typedef const _Tp* const_pointer; 815 typedef _Tp& reference; 816 typedef const _Tp& const_reference; 817 typedef _Tp value_type; 818 819 template <class _Tp1> struct rebind { 820 typedef __allocator<_Tp1, _Alloc> other; 821 }; 822 823 __allocator() __STL_NOTHROW {} 824 __allocator(const __allocator& __a) __STL_NOTHROW 825 : __underlying_alloc(__a.__underlying_alloc) {} 826 template <class _Tp1> 827 __allocator(const __allocator<_Tp1, _Alloc>& __a) __STL_NOTHROW 828 : __underlying_alloc(__a.__underlying_alloc) {} 829 ~__allocator() __STL_NOTHROW {} 830 831 pointer address(reference __x) const { return &__x; } 832 const_pointer address(const_reference __x) const { return &__x; } 833 834 // __n is permitted to be 0. 835 _Tp* allocate(size_type __n, const void* = 0) { 836 return __n != 0 837 ? static_cast<_Tp*>(__underlying_alloc.allocate(__n * sizeof(_Tp))) 838 : 0; 839 } 840 841 // __p is not permitted to be a null pointer. 842 void deallocate(pointer __p, size_type __n) 843 { __underlying_alloc.deallocate(__p, __n * sizeof(_Tp)); } 844 845 size_type max_size() const __STL_NOTHROW 846 { return size_t(-1) / sizeof(_Tp); } 847 848 void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); } 849 void destroy(pointer __p) { __p->~_Tp(); } 850 }; 851 852 template <class _Alloc> 853 class __allocator<void, _Alloc> { 854 typedef size_t size_type; 855 typedef ptrdiff_t difference_type; 856 typedef void* pointer; 857 typedef const void* const_pointer; 858 typedef void value_type; 859 860 template <class _Tp1> struct rebind { 861 typedef __allocator<_Tp1, _Alloc> other; 862 }; 863 }; 864 865 template <class _Tp, class _Alloc> 866 inline bool operator==(const __allocator<_Tp, _Alloc>& __a1, 867 const __allocator<_Tp, _Alloc>& __a2) 868 { 869 return __a1.__underlying_alloc == __a2.__underlying_alloc; 870 } 871 872 #ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER 873 template <class _Tp, class _Alloc> 874 inline bool operator!=(const __allocator<_Tp, _Alloc>& __a1, 875 const __allocator<_Tp, _Alloc>& __a2) 876 { 877 return __a1.__underlying_alloc != __a2.__underlying_alloc; 878 } 879 #endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */ 880 881 // Comparison operators for all of the predifined SGI-style allocators. 882 // This ensures that __allocator<malloc_alloc> (for example) will 883 // work correctly. 884 885 template <int inst> 886 inline bool operator==(const __malloc_alloc_template<inst>&, 887 const __malloc_alloc_template<inst>&) 888 { 889 return true; 890 } 891 892 #ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER 893 template <int __inst> 894 inline bool operator!=(const __malloc_alloc_template<__inst>&, 895 const __malloc_alloc_template<__inst>&) 896 { 897 return false; 898 } 899 #endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */ 900 901 #ifndef __USE_MALLOC 902 template <bool __threads, int __inst> 903 inline bool operator==(const __default_alloc_template<__threads, __inst>&, 904 const __default_alloc_template<__threads, __inst>&) 905 { 906 return true; 907 } 908 909 # ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER 910 template <bool __threads, int __inst> 911 inline bool operator!=(const __default_alloc_template<__threads, __inst>&, 912 const __default_alloc_template<__threads, __inst>&) 913 { 914 return false; 915 } 916 # endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */ 917 #endif 918 919 template <class _Alloc> 920 inline bool operator==(const debug_alloc<_Alloc>&, 921 const debug_alloc<_Alloc>&) { 922 return true; 923 } 924 925 #ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER 926 template <class _Alloc> 927 inline bool operator!=(const debug_alloc<_Alloc>&, 928 const debug_alloc<_Alloc>&) { 929 return false; 930 } 931 #endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */ 932 933 // Another allocator adaptor: _Alloc_traits. This serves two 934 // purposes. First, make it possible to write containers that can use 935 // either SGI-style allocators or standard-conforming allocator. 936 // Second, provide a mechanism so that containers can query whether or 937 // not the allocator has distinct instances. If not, the container 938 // can avoid wasting a word of memory to store an empty object. 939 940 // This adaptor uses partial specialization. The general case of 941 // _Alloc_traits<_Tp, _Alloc> assumes that _Alloc is a 942 // standard-conforming allocator, possibly with non-equal instances 943 // and non-static members. (It still behaves correctly even if _Alloc 944 // has static member and if all instances are equal. Refinements 945 // affect performance, not correctness.) 946 947 // There are always two members: allocator_type, which is a standard- 948 // conforming allocator type for allocating objects of type _Tp, and 949 // _S_instanceless, a static const member of type bool. If 950 // _S_instanceless is true, this means that there is no difference 951 // between any two instances of type allocator_type. Furthermore, if 952 // _S_instanceless is true, then _Alloc_traits has one additional 953 // member: _Alloc_type. This type encapsulates allocation and 954 // deallocation of objects of type _Tp through a static interface; it 955 // has two member functions, whose signatures are 956 // static _Tp* allocate(size_t) 957 // static void deallocate(_Tp*, size_t) 958 959 // The fully general version. 960 961 template <class _Tp, class _Allocator> 962 struct _Alloc_traits 963 { 964 static const bool _S_instanceless = false; 965 typedef typename _Allocator::__STL_TEMPLATE rebind<_Tp>::other 966 allocator_type; 967 }; 968 969 template <class _Tp, class _Allocator> 970 const bool _Alloc_traits<_Tp, _Allocator>::_S_instanceless; 971 972 // The version for the default allocator. 973 974 template <class _Tp, class _Tp1> 975 struct _Alloc_traits<_Tp, allocator<_Tp1> > 976 { 977 static const bool _S_instanceless = true; 978 typedef simple_alloc<_Tp, alloc> _Alloc_type; 979 typedef allocator<_Tp> allocator_type; 980 }; 981 982 // Versions for the predefined SGI-style allocators. 983 984 template <class _Tp, int __inst> 985 struct _Alloc_traits<_Tp, __malloc_alloc_template<__inst> > 986 { 987 static const bool _S_instanceless = true; 988 typedef simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type; 989 typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type; 990 }; 991 992 #ifndef __USE_MALLOC 993 template <class _Tp, bool __threads, int __inst> 994 struct _Alloc_traits<_Tp, __default_alloc_template<__threads, __inst> > 995 { 996 static const bool _S_instanceless = true; 997 typedef simple_alloc<_Tp, __default_alloc_template<__threads, __inst> > 998 _Alloc_type; 999 typedef __allocator<_Tp, __default_alloc_template<__threads, __inst> > 1000 allocator_type; 1001 }; 1002 #endif 1003 1004 template <class _Tp, class _Alloc> 1005 struct _Alloc_traits<_Tp, debug_alloc<_Alloc> > 1006 { 1007 static const bool _S_instanceless = true; 1008 typedef simple_alloc<_Tp, debug_alloc<_Alloc> > _Alloc_type; 1009 typedef __allocator<_Tp, debug_alloc<_Alloc> > allocator_type; 1010 }; 1011 1012 // Versions for the __allocator adaptor used with the predefined 1013 // SGI-style allocators. 1014 1015 template <class _Tp, class _Tp1, int __inst> 1016 struct _Alloc_traits<_Tp, 1017 __allocator<_Tp1, __malloc_alloc_template<__inst> > > 1018 { 1019 static const bool _S_instanceless = true; 1020 typedef simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type; 1021 typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type; 1022 }; 1023 1024 #ifndef __USE_MALLOC 1025 template <class _Tp, class _Tp1, bool __thr, int __inst> 1026 struct _Alloc_traits<_Tp, 1027 __allocator<_Tp1, 1028 __default_alloc_template<__thr, __inst> > > 1029 { 1030 static const bool _S_instanceless = true; 1031 typedef simple_alloc<_Tp, __default_alloc_template<__thr,__inst> > 1032 _Alloc_type; 1033 typedef __allocator<_Tp, __default_alloc_template<__thr,__inst> > 1034 allocator_type; 1035 }; 1036 #endif 1037 1038 template <class _Tp, class _Tp1, class _Alloc> 1039 struct _Alloc_traits<_Tp, __allocator<_Tp1, debug_alloc<_Alloc> > > 1040 { 1041 static const bool _S_instanceless = true; 1042 typedef simple_alloc<_Tp, debug_alloc<_Alloc> > _Alloc_type; 1043 typedef __allocator<_Tp, debug_alloc<_Alloc> > allocator_type; 1044 }; 1045 1046 1047 #endif /* __STL_USE_STD_ALLOCATORS */ 1048 1049 #if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32) 1050 #pragma reset woff 1174 1051 #endif 1052 1053 __STL_END_NAMESPACE 1054 1055 #undef __PRIVATE 1056 1057 #endif /* __SGI_STL_INTERNAL_ALLOC_H */ 1058 1059 // Local Variables: 1060 // mode:C++ 1061 // End: 1062