1 /* 2 * Copyright (c) 1996-1997 3 * Silicon Graphics Computer Systems, Inc. 4 * 5 * Permission to use, copy, modify, distribute and sell this software 6 * and its documentation for any purpose is hereby granted without fee, 7 * provided that the above copyright notice appear in all copies and 8 * that both that copyright notice and this permission notice appear 9 * in supporting documentation. Silicon Graphics makes no 10 * representations about the suitability of this software for any 11 * purpose. It is provided "as is" without express or implied warranty. 12 */ 13 14 /* NOTE: This is an internal header file, included by other STL headers. 15 * You should not attempt to use it directly. 16 */ 17 18 #ifndef __SGI_STL_INTERNAL_ALLOC_H 19 #define __SGI_STL_INTERNAL_ALLOC_H 20 21 #ifdef __SUNPRO_CC 22 # define __PRIVATE public 23 // Extra access restrictions prevent us from really making some things 24 // private. 25 #else 26 # define __PRIVATE private 27 #endif 28 29 #ifdef __STL_STATIC_TEMPLATE_MEMBER_BUG 30 # define __USE_MALLOC 31 #endif 32 33 34 // This implements some standard node allocators. These are 35 // NOT the same as the allocators in the C++ draft standard or in 36 // in the original STL. They do not encapsulate different pointer 37 // types; indeed we assume that there is only one pointer type. 38 // The allocation primitives are intended to allocate individual objects, 39 // not larger arenas as with the original STL allocators. 40 41 #if 0 42 # include <new> 43 # define __THROW_BAD_ALLOC throw bad_alloc() 44 #elif !defined(__THROW_BAD_ALLOC) 45 #if __BEOS__ 46 # include <stdio.h> 47 # define __THROW_BAD_ALLOC fprintf(stderr, "out of memory\n"); exit(1) 48 #else 49 # include <iostream.h> 50 # define __THROW_BAD_ALLOC cerr << "out of memory" << endl; exit(1) 51 #endif 52 #endif 53 54 #ifdef __STL_WIN32THREADS 55 # include <windows.h> 56 #endif 57 58 #include <stddef.h> 59 #include <stdlib.h> 60 #include <string.h> 61 #include <assert.h> 62 #ifndef __RESTRICT 63 # define __RESTRICT 64 #endif 65 66 #if !defined(__STL_PTHREADS) && !defined(__STL_SOLTHREADS) \ 67 && !defined(_NOTHREADS) \ 68 && !defined(__STL_SGI_THREADS) && !defined(__STL_WIN32THREADS) 69 # define _NOTHREADS 70 #endif 71 72 # ifdef __STL_PTHREADS 73 // POSIX Threads 74 // This is dubious, since this is likely to be a high contention 75 // lock. Performance may not be adequate. 76 # include <pthread.h> 77 # define __NODE_ALLOCATOR_LOCK \ 78 if (threads) pthread_mutex_lock(&_S_node_allocator_lock) 79 # define __NODE_ALLOCATOR_UNLOCK \ 80 if (threads) pthread_mutex_unlock(&_S_node_allocator_lock) 81 # define __NODE_ALLOCATOR_THREADS true 82 # define __VOLATILE volatile // Needed at -O3 on SGI 83 # endif 84 # ifdef __STL_SOLTHREADS 85 # include <thread.h> 86 # define __NODE_ALLOCATOR_LOCK \ 87 if (threads) mutex_lock(&_S_node_allocator_lock) 88 # define __NODE_ALLOCATOR_UNLOCK \ 89 if (threads) mutex_unlock(&_S_node_allocator_lock) 90 # define __NODE_ALLOCATOR_THREADS true 91 # define __VOLATILE 92 # endif 93 # ifdef __STL_WIN32THREADS 94 // The lock needs to be initialized by constructing an allocator 95 // objects of the right type. We do that here explicitly for alloc. 96 # define __NODE_ALLOCATOR_LOCK \ 97 EnterCriticalSection(&_S_node_allocator_lock) 98 # define __NODE_ALLOCATOR_UNLOCK \ 99 LeaveCriticalSection(&_S_node_allocator_lock) 100 # define __NODE_ALLOCATOR_THREADS true 101 # define __VOLATILE volatile // may not be needed 102 # endif /* WIN32THREADS */ 103 # ifdef __STL_SGI_THREADS 104 // This should work without threads, with sproc threads, or with 105 // pthreads. It is suboptimal in all cases. 106 // It is unlikely to even compile on nonSGI machines. 107 108 extern "C" { 109 extern int __us_rsthread_malloc; 110 } 111 // The above is copied from malloc.h. Including <malloc.h> 112 // would be cleaner but fails with certain levels of standard 113 // conformance. 114 # define __NODE_ALLOCATOR_LOCK if (threads && __us_rsthread_malloc) \ 115 { _S_lock(&_S_node_allocator_lock); } 116 # define __NODE_ALLOCATOR_UNLOCK if (threads && __us_rsthread_malloc) \ 117 { _S_unlock(&_S_node_allocator_lock); } 118 # define __NODE_ALLOCATOR_THREADS true 119 # define __VOLATILE volatile // Needed at -O3 on SGI 120 # endif 121 # ifdef _NOTHREADS 122 // Thread-unsafe 123 # define __NODE_ALLOCATOR_LOCK 124 # define __NODE_ALLOCATOR_UNLOCK 125 # define __NODE_ALLOCATOR_THREADS false 126 # define __VOLATILE 127 # endif 128 129 __STL_BEGIN_NAMESPACE 130 131 #if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32) 132 #pragma set woff 1174 133 #endif 134 135 // Malloc-based allocator. Typically slower than default alloc below. 136 // Typically thread-safe and more storage efficient. 137 #ifdef __STL_STATIC_TEMPLATE_MEMBER_BUG 138 # ifdef __DECLARE_GLOBALS_HERE 139 void (* __malloc_alloc_oom_handler)() = 0; 140 // g++ 2.7.2 does not handle static template data members. 141 # else 142 extern void (* __malloc_alloc_oom_handler)(); 143 # endif 144 #endif 145 146 template <int __inst> 147 class __malloc_alloc_template { 148 149 private: 150 151 static void* _S_oom_malloc(size_t); 152 static void* _S_oom_realloc(void*, size_t); 153 154 #ifndef __STL_STATIC_TEMPLATE_MEMBER_BUG 155 static void (* __malloc_alloc_oom_handler)(); 156 #endif 157 158 public: 159 160 static void* allocate(size_t __n) 161 { 162 void* __result = malloc(__n); 163 if (0 == __result) __result = _S_oom_malloc(__n); 164 return __result; 165 } 166 167 static void deallocate(void* __p, size_t /* __n */) 168 { 169 free(__p); 170 } 171 172 static void* reallocate(void* __p, size_t /* old_sz */, size_t __new_sz) 173 { 174 void* __result = realloc(__p, __new_sz); 175 if (0 == __result) __result = _S_oom_realloc(__p, __new_sz); 176 return __result; 177 } 178 179 static void (* __set_malloc_handler(void (*__f)()))() 180 { 181 void (* __old)() = __malloc_alloc_oom_handler; 182 __malloc_alloc_oom_handler = __f; 183 return(__old); 184 } 185 186 }; 187 188 // malloc_alloc out-of-memory handling 189 190 #ifndef __STL_STATIC_TEMPLATE_MEMBER_BUG 191 template <int __inst> 192 void (* __malloc_alloc_template<__inst>::__malloc_alloc_oom_handler)() = 0; 193 #endif 194 195 template <int __inst> 196 void* 197 __malloc_alloc_template<__inst>::_S_oom_malloc(size_t __n) 198 { 199 void (* __my_malloc_handler)(); 200 void* __result; 201 202 for (;;) { 203 __my_malloc_handler = __malloc_alloc_oom_handler; 204 if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; } 205 (*__my_malloc_handler)(); 206 __result = malloc(__n); 207 if (__result) return(__result); 208 } 209 } 210 211 template <int __inst> 212 void* __malloc_alloc_template<__inst>::_S_oom_realloc(void* __p, size_t __n) 213 { 214 void (* __my_malloc_handler)(); 215 void* __result; 216 217 for (;;) { 218 __my_malloc_handler = __malloc_alloc_oom_handler; 219 if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; } 220 (*__my_malloc_handler)(); 221 __result = realloc(__p, __n); 222 if (__result) return(__result); 223 } 224 } 225 226 typedef __malloc_alloc_template<0> malloc_alloc; 227 228 template<class _Tp, class _Alloc> 229 class simple_alloc { 230 231 public: 232 static _Tp* allocate(size_t __n) 233 { return 0 == __n ? 0 : (_Tp*) _Alloc::allocate(__n * sizeof (_Tp)); } 234 static _Tp* allocate(void) 235 { return (_Tp*) _Alloc::allocate(sizeof (_Tp)); } 236 static void deallocate(_Tp* __p, size_t __n) 237 { if (0 != __n) _Alloc::deallocate(__p, __n * sizeof (_Tp)); } 238 static void deallocate(_Tp* __p) 239 { _Alloc::deallocate(__p, sizeof (_Tp)); } 240 }; 241 242 // Allocator adaptor to check size arguments for debugging. 243 // Reports errors using assert. Checking can be disabled with 244 // NDEBUG, but it's far better to just use the underlying allocator 245 // instead when no checking is desired. 246 // There is some evidence that this can confuse Purify. 247 template <class _Alloc> 248 class debug_alloc { 249 250 private: 251 252 enum {_S_extra = 8}; // Size of space used to store size. Note 253 // that this must be large enough to preserve 254 // alignment. 255 256 public: 257 258 static void* allocate(size_t __n) 259 { 260 char* __result = (char*)_Alloc::allocate(__n + _S_extra); 261 *(size_t*)__result = __n; 262 return __result + _S_extra; 263 } 264 265 static void deallocate(void* __p, size_t __n) 266 { 267 char* __real_p = (char*)__p - _S_extra; 268 assert(*(size_t*)__real_p == __n); 269 _Alloc::deallocate(__real_p, __n + _S_extra); 270 } 271 272 static void* reallocate(void* __p, size_t __old_sz, size_t __new_sz) 273 { 274 char* __real_p = (char*)__p - _S_extra; 275 assert(*(size_t*)__real_p == __old_sz); 276 char* __result = (char*) 277 _Alloc::reallocate(__real_p, __old_sz + _S_extra, __new_sz + _S_extra); 278 *(size_t*)__result = __new_sz; 279 return __result + _S_extra; 280 } 281 282 }; 283 284 285 # ifdef __USE_MALLOC 286 287 typedef malloc_alloc alloc; 288 typedef malloc_alloc single_client_alloc; 289 290 # else 291 292 293 // Default node allocator. 294 // With a reasonable compiler, this should be roughly as fast as the 295 // original STL class-specific allocators, but with less fragmentation. 296 // Default_alloc_template parameters are experimental and MAY 297 // DISAPPEAR in the future. Clients should just use alloc for now. 298 // 299 // Important implementation properties: 300 // 1. If the client request an object of size > _MAX_BYTES, the resulting 301 // object will be obtained directly from malloc. 302 // 2. In all other cases, we allocate an object of size exactly 303 // _S_round_up(requested_size). Thus the client has enough size 304 // information that we can return the object to the proper free list 305 // without permanently losing part of the object. 306 // 307 308 // The first template parameter specifies whether more than one thread 309 // may use this allocator. It is safe to allocate an object from 310 // one instance of a default_alloc and deallocate it with another 311 // one. This effectively transfers its ownership to the second one. 312 // This may have undesirable effects on reference locality. 313 // The second parameter is unreferenced and serves only to allow the 314 // creation of multiple default_alloc instances. 315 // Node that containers built on different allocator instances have 316 // different types, limiting the utility of this approach. 317 #ifdef __SUNPRO_CC 318 // breaks if we make these template class members: 319 enum {_ALIGN = 8}; 320 enum {_MAX_BYTES = 128}; 321 enum {_NFREELISTS = _MAX_BYTES/_ALIGN}; 322 #endif 323 324 template <bool threads, int inst> 325 class __default_alloc_template { 326 327 private: 328 // Really we should use static const int x = N 329 // instead of enum { x = N }, but few compilers accept the former. 330 # ifndef __SUNPRO_CC 331 enum {_ALIGN = 8}; 332 # if defined __BEOS__ 333 enum {_MAX_BYTES = 0}; 334 # else 335 enum {_MAX_BYTES = 128}; 336 # endif 337 enum {_NFREELISTS = _MAX_BYTES/_ALIGN}; 338 # endif 339 static size_t 340 _S_round_up(size_t __bytes) 341 { return (((__bytes) + _ALIGN-1) & ~(_ALIGN - 1)); } 342 343 __PRIVATE: 344 union _Obj { 345 union _Obj* _M_free_list_link; 346 char _M_client_data[1]; /* The client sees this. */ 347 }; 348 private: 349 # ifdef __SUNPRO_CC 350 static _Obj* __VOLATILE _S_free_list[]; 351 // Specifying a size results in duplicate def for 4.1 352 # else 353 static _Obj* __VOLATILE _S_free_list[_NFREELISTS]; 354 # endif 355 static size_t _S_freelist_index(size_t __bytes) { 356 return (((__bytes) + _ALIGN-1)/_ALIGN - 1); 357 } 358 359 // Returns an object of size __n, and optionally adds to size __n free list. 360 static void* _S_refill(size_t __n); 361 // Allocates a chunk for nobjs of size "size". nobjs may be reduced 362 // if it is inconvenient to allocate the requested number. 363 static char* _S_chunk_alloc(size_t __size, int& __nobjs); 364 365 // Chunk allocation state. 366 static char* _S_start_free; 367 static char* _S_end_free; 368 static size_t _S_heap_size; 369 370 # ifdef __STL_SGI_THREADS 371 static volatile unsigned long _S_node_allocator_lock; 372 static void _S_lock(volatile unsigned long*); 373 static inline void _S_unlock(volatile unsigned long*); 374 # endif 375 376 # ifdef __STL_PTHREADS 377 static pthread_mutex_t _S_node_allocator_lock; 378 # endif 379 380 # ifdef __STL_SOLTHREADS 381 static mutex_t _S_node_allocator_lock; 382 # endif 383 384 # ifdef __STL_WIN32THREADS 385 static CRITICAL_SECTION _S_node_allocator_lock; 386 static bool _S_node_allocator_lock_initialized; 387 388 public: 389 __default_alloc_template() { 390 // This assumes the first constructor is called before threads 391 // are started. 392 if (!_S_node_allocator_lock_initialized) { 393 InitializeCriticalSection(&_S_node_allocator_lock); 394 _S_node_allocator_lock_initialized = true; 395 } 396 } 397 private: 398 # endif 399 400 class _Lock { 401 public: 402 _Lock() { __NODE_ALLOCATOR_LOCK; } 403 ~_Lock() { __NODE_ALLOCATOR_UNLOCK; } 404 }; 405 friend class _Lock; 406 407 public: 408 409 /* __n must be > 0 */ 410 static void* allocate(size_t __n) 411 { 412 _Obj* __VOLATILE* __my_free_list; 413 _Obj* __RESTRICT __result; 414 415 if (__n > (size_t) _MAX_BYTES) { 416 return(malloc_alloc::allocate(__n)); 417 } 418 __my_free_list = _S_free_list + _S_freelist_index(__n); 419 // Acquire the lock here with a constructor call. 420 // This ensures that it is released in exit or during stack 421 // unwinding. 422 # ifndef _NOTHREADS 423 /*REFERENCED*/ 424 _Lock __lock_instance; 425 # endif 426 __result = *__my_free_list; 427 if (__result == 0) { 428 void* __r = _S_refill(_S_round_up(__n)); 429 return __r; 430 } 431 *__my_free_list = __result -> _M_free_list_link; 432 return (__result); 433 }; 434 435 /* __p may not be 0 */ 436 static void deallocate(void* __p, size_t __n) 437 { 438 _Obj* __q = (_Obj*)__p; 439 _Obj* __VOLATILE* __my_free_list; 440 441 if (__n > (size_t) _MAX_BYTES) { 442 malloc_alloc::deallocate(__p, __n); 443 return; 444 } 445 __my_free_list = _S_free_list + _S_freelist_index(__n); 446 // acquire lock 447 # ifndef _NOTHREADS 448 /*REFERENCED*/ 449 _Lock __lock_instance; 450 # endif /* _NOTHREADS */ 451 __q -> _M_free_list_link = *__my_free_list; 452 *__my_free_list = __q; 453 // lock is released here 454 } 455 456 static void* reallocate(void* __p, size_t __old_sz, size_t __new_sz); 457 458 } ; 459 460 typedef __default_alloc_template<__NODE_ALLOCATOR_THREADS, 0> alloc; 461 typedef __default_alloc_template<false, 0> single_client_alloc; 462 463 464 465 /* We allocate memory in large chunks in order to avoid fragmenting */ 466 /* the malloc heap too much. */ 467 /* We assume that size is properly aligned. */ 468 /* We hold the allocation lock. */ 469 template <bool __threads, int __inst> 470 char* 471 __default_alloc_template<__threads, __inst>::_S_chunk_alloc(size_t __size, 472 int& __nobjs) 473 { 474 char* __result; 475 size_t __total_bytes = __size * __nobjs; 476 size_t __bytes_left = _S_end_free - _S_start_free; 477 478 if (__bytes_left >= __total_bytes) { 479 __result = _S_start_free; 480 _S_start_free += __total_bytes; 481 return(__result); 482 } else if (__bytes_left >= __size) { 483 __nobjs = (int)(__bytes_left/__size); 484 __total_bytes = __size * __nobjs; 485 __result = _S_start_free; 486 _S_start_free += __total_bytes; 487 return(__result); 488 } else { 489 size_t __bytes_to_get = 490 2 * __total_bytes + _S_round_up(_S_heap_size >> 4); 491 // Try to make use of the left-over piece. 492 if (__bytes_left > 0) { 493 _Obj* __VOLATILE* __my_free_list = 494 _S_free_list + _S_freelist_index(__bytes_left); 495 496 ((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list; 497 *__my_free_list = (_Obj*)_S_start_free; 498 } 499 _S_start_free = (char*)malloc(__bytes_to_get); 500 if (0 == _S_start_free) { 501 size_t __i; 502 _Obj* __VOLATILE* __my_free_list; 503 _Obj* __p; 504 // Try to make do with what we have. That can't 505 // hurt. We do not try smaller requests, since that tends 506 // to result in disaster on multi-process machines. 507 for (__i = __size; __i <= _MAX_BYTES; __i += _ALIGN) { 508 __my_free_list = _S_free_list + _S_freelist_index(__i); 509 __p = *__my_free_list; 510 if (0 != __p) { 511 *__my_free_list = __p -> _M_free_list_link; 512 _S_start_free = (char*)__p; 513 _S_end_free = _S_start_free + __i; 514 return(_S_chunk_alloc(__size, __nobjs)); 515 // Any leftover piece will eventually make it to the 516 // right free list. 517 } 518 } 519 _S_end_free = 0; // In case of exception. 520 _S_start_free = (char*)malloc_alloc::allocate(__bytes_to_get); 521 // This should either throw an 522 // exception or remedy the situation. Thus we assume it 523 // succeeded. 524 } 525 _S_heap_size += __bytes_to_get; 526 _S_end_free = _S_start_free + __bytes_to_get; 527 return(_S_chunk_alloc(__size, __nobjs)); 528 } 529 } 530 531 532 /* Returns an object of size __n, and optionally adds to size __n free list.*/ 533 /* We assume that __n is properly aligned. */ 534 /* We hold the allocation lock. */ 535 template <bool __threads, int __inst> 536 void* 537 __default_alloc_template<__threads, __inst>::_S_refill(size_t __n) 538 { 539 int __nobjs = 20; 540 char* __chunk = _S_chunk_alloc(__n, __nobjs); 541 _Obj* __VOLATILE* __my_free_list; 542 _Obj* __result; 543 _Obj* __current_obj; 544 _Obj* __next_obj; 545 int __i; 546 547 if (1 == __nobjs) return(__chunk); 548 __my_free_list = _S_free_list + _S_freelist_index(__n); 549 550 /* Build free list in chunk */ 551 __result = (_Obj*)__chunk; 552 *__my_free_list = __next_obj = (_Obj*)(__chunk + __n); 553 for (__i = 1; ; __i++) { 554 __current_obj = __next_obj; 555 __next_obj = (_Obj*)((char*)__next_obj + __n); 556 if (__nobjs - 1 == __i) { 557 __current_obj -> _M_free_list_link = 0; 558 break; 559 } else { 560 __current_obj -> _M_free_list_link = __next_obj; 561 } 562 } 563 return(__result); 564 } 565 566 template <bool threads, int inst> 567 void* 568 __default_alloc_template<threads, inst>::reallocate(void* __p, 569 size_t __old_sz, 570 size_t __new_sz) 571 { 572 void* __result; 573 size_t __copy_sz; 574 575 if (__old_sz > (size_t) _MAX_BYTES && __new_sz > (size_t) _MAX_BYTES) { 576 return(realloc(__p, __new_sz)); 577 } 578 if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return(__p); 579 __result = allocate(__new_sz); 580 __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz; 581 memcpy(__result, __p, __copy_sz); 582 deallocate(__p, __old_sz); 583 return(__result); 584 } 585 586 #ifdef __STL_PTHREADS 587 template <bool __threads, int __inst> 588 pthread_mutex_t 589 __default_alloc_template<__threads, __inst>::_S_node_allocator_lock 590 = PTHREAD_MUTEX_INITIALIZER; 591 #endif 592 593 #ifdef __STL_SOLTHREADS 594 template <bool __threads, int __inst> 595 mutex_t 596 __default_alloc_template<__threads, __inst>::_S_node_allocator_lock 597 = DEFAULTMUTEX; 598 #endif 599 600 #ifdef __STL_WIN32THREADS 601 template <bool __threads, int __inst> 602 CRITICAL_SECTION 603 __default_alloc_template<__threads, __inst>:: 604 _S_node_allocator_lock; 605 606 template <bool __threads, int __inst> 607 bool 608 __default_alloc_template<__threads, __inst>:: 609 _S_node_allocator_lock_initialized 610 = false; 611 #endif 612 613 #ifdef __STL_SGI_THREADS 614 __STL_END_NAMESPACE 615 #include <mutex.h> 616 #include <time.h> /* XXX should use <ctime> */ 617 __STL_BEGIN_NAMESPACE 618 // Somewhat generic lock implementations. We need only test-and-set 619 // and some way to sleep. These should work with both SGI pthreads 620 // and sproc threads. They may be useful on other systems. 621 template <bool __threads, int __inst> 622 volatile unsigned long 623 __default_alloc_template<__threads, __inst>::_S_node_allocator_lock = 0; 624 625 #if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) || defined(__GNUC__) 626 # define __test_and_set(l,v) test_and_set(l,v) 627 #endif 628 629 template <bool __threads, int __inst> 630 void 631 __default_alloc_template<__threads, __inst>:: 632 _S_lock(volatile unsigned long* __lock) 633 { 634 const unsigned __low_spin_max = 30; // spins if we suspect uniprocessor 635 const unsigned __high_spin_max = 1000; // spins for multiprocessor 636 static unsigned __spin_max = __low_spin_max; 637 unsigned __my_spin_max; 638 static unsigned __last_spins = 0; 639 unsigned __my_last_spins; 640 unsigned __junk; 641 # define __ALLOC_PAUSE \ 642 __junk *= __junk; __junk *= __junk; __junk *= __junk; __junk *= __junk 643 int __i; 644 645 if (!__test_and_set((unsigned long*)__lock, 1)) { 646 return; 647 } 648 __my_spin_max = __spin_max; 649 __my_last_spins = __last_spins; 650 for (__i = 0; __i < __my_spin_max; __i++) { 651 if (__i < __my_last_spins/2 || *__lock) { 652 __ALLOC_PAUSE; 653 continue; 654 } 655 if (!__test_and_set((unsigned long*)__lock, 1)) { 656 // got it! 657 // Spinning worked. Thus we're probably not being scheduled 658 // against the other process with which we were contending. 659 // Thus it makes sense to spin longer the next time. 660 __last_spins = __i; 661 __spin_max = __high_spin_max; 662 return; 663 } 664 } 665 // We are probably being scheduled against the other process. Sleep. 666 __spin_max = __low_spin_max; 667 for (__i = 0 ;; ++__i) { 668 struct timespec __ts; 669 int __log_nsec = __i + 6; 670 671 if (!__test_and_set((unsigned long *)__lock, 1)) { 672 return; 673 } 674 if (__log_nsec > 27) __log_nsec = 27; 675 /* Max sleep is 2**27nsec ~ 60msec */ 676 __ts.tv_sec = 0; 677 __ts.tv_nsec = 1 << __log_nsec; 678 nanosleep(&__ts, 0); 679 } 680 } 681 682 template <bool __threads, int __inst> 683 inline void 684 __default_alloc_template<__threads, __inst>::_S_unlock( 685 volatile unsigned long* __lock) 686 { 687 # if defined(__GNUC__) && __mips >= 3 688 asm("sync"); 689 *__lock = 0; 690 # elif __mips >= 3 && (defined (_ABIN32) || defined(_ABI64)) 691 __lock_release(__lock); 692 # else 693 *__lock = 0; 694 // This is not sufficient on many multiprocessors, since 695 // writes to protected variables and the lock may be reordered. 696 # endif 697 } 698 #endif 699 700 template <bool __threads, int __inst> 701 char* __default_alloc_template<__threads, __inst>::_S_start_free = 0; 702 703 template <bool __threads, int __inst> 704 char* __default_alloc_template<__threads, __inst>::_S_end_free = 0; 705 706 template <bool __threads, int __inst> 707 size_t __default_alloc_template<__threads, __inst>::_S_heap_size = 0; 708 709 template <bool __threads, int __inst> 710 __default_alloc_template<__threads, __inst>::_Obj* __VOLATILE 711 __default_alloc_template<__threads, __inst> ::_S_free_list[ 712 _NFREELISTS 713 #if defined __BEOS__ 714 ] = { }; 715 #else 716 ] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; 717 #endif 718 // The 16 zeros are necessary to make version 4.1 of the SunPro 719 // compiler happy. Otherwise it appears to allocate too little 720 // space for the array. 721 722 # ifdef __STL_WIN32THREADS 723 // Create one to get critical section initialized. 724 // We do this onece per file, but only the first constructor 725 // does anything. 726 static alloc __node_allocator_dummy_instance; 727 # endif 728 729 #endif /* ! __USE_MALLOC */ 730 731 // This implements allocators as specified in the C++ standard. 732 // 733 // Note that standard-conforming allocators use many language features 734 // that are not yet widely implemented. In particular, they rely on 735 // member templates, partial specialization, partial ordering of function 736 // templates, the typename keyword, and the use of the template keyword 737 // to refer to a template member of a dependent type. 738 739 #ifdef __STL_USE_STD_ALLOCATORS 740 741 template <class _Tp> 742 class allocator { 743 typedef alloc _Alloc; // The underlying allocator. 744 public: 745 typedef size_t size_type; 746 typedef ptrdiff_t difference_type; 747 typedef _Tp* pointer; 748 typedef const _Tp* const_pointer; 749 typedef _Tp& reference; 750 typedef const _Tp& const_reference; 751 typedef _Tp value_type; 752 753 template <class _Tp1> struct rebind { 754 typedef allocator<_Tp1> other; 755 }; 756 757 allocator() __STL_NOTHROW {} 758 allocator(const allocator&) __STL_NOTHROW {} 759 template <class _Tp1> allocator(const allocator<_Tp1>&) __STL_NOTHROW {} 760 ~allocator() __STL_NOTHROW {} 761 762 pointer address(reference __x) const { return &__x; } 763 const_pointer address(const_reference __x) const { return &__x; } 764 765 // __n is permitted to be 0. The C++ standard says nothing about what 766 // the return value is when __n == 0. 767 _Tp* allocate(size_type __n, const void* = 0) { 768 return __n != 0 ? static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp))) 769 : 0; 770 } 771 772 // __p is not permitted to be a null pointer. 773 void deallocate(pointer __p, size_type __n) 774 { _Alloc::deallocate(__p, __n * sizeof(_Tp)); } 775 776 size_type max_size() const __STL_NOTHROW 777 { return size_t(-1) / sizeof(_Tp); } 778 779 void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); } 780 void destroy(pointer __p) { __p->~_Tp(); } 781 }; 782 783 template<> 784 class allocator<void> { 785 typedef size_t size_type; 786 typedef ptrdiff_t difference_type; 787 typedef void* pointer; 788 typedef const void* const_pointer; 789 typedef void value_type; 790 791 template <class _Tp1> struct rebind { 792 typedef allocator<_Tp1> other; 793 }; 794 }; 795 796 797 template <class _T1, class _T2> 798 inline bool operator==(const allocator<_T1>&, const allocator<_T2>&) 799 { 800 return true; 801 } 802 803 template <class _T1, class _T2> 804 inline bool operator!=(const allocator<_T1>&, const allocator<_T2>&) 805 { 806 return false; 807 } 808 809 // Allocator adaptor to turn an SGI-style allocator (e.g. alloc, malloc_alloc) 810 // into a standard-conforming allocator. Note that this adaptor does 811 // *not* assume that all objects of the underlying alloc class are 812 // identical, nor does it assume that all of the underlying alloc's 813 // member functions are static member functions. Note, also, that 814 // __allocator<_Tp, alloc> is essentially the same thing as allocator<_Tp>. 815 816 template <class _Tp, class _Alloc> 817 struct __allocator { 818 _Alloc __underlying_alloc; 819 820 typedef size_t size_type; 821 typedef ptrdiff_t difference_type; 822 typedef _Tp* pointer; 823 typedef const _Tp* const_pointer; 824 typedef _Tp& reference; 825 typedef const _Tp& const_reference; 826 typedef _Tp value_type; 827 828 template <class _Tp1> struct rebind { 829 typedef __allocator<_Tp1, _Alloc> other; 830 }; 831 832 __allocator() __STL_NOTHROW {} 833 __allocator(const __allocator& __a) __STL_NOTHROW 834 : __underlying_alloc(__a.__underlying_alloc) {} 835 template <class _Tp1> 836 __allocator(const __allocator<_Tp1, _Alloc>& __a) __STL_NOTHROW 837 : __underlying_alloc(__a.__underlying_alloc) {} 838 ~__allocator() __STL_NOTHROW {} 839 840 pointer address(reference __x) const { return &__x; } 841 const_pointer address(const_reference __x) const { return &__x; } 842 843 // __n is permitted to be 0. 844 _Tp* allocate(size_type __n, const void* = 0) { 845 return __n != 0 846 ? static_cast<_Tp*>(__underlying_alloc.allocate(__n * sizeof(_Tp))) 847 : 0; 848 } 849 850 // __p is not permitted to be a null pointer. 851 void deallocate(pointer __p, size_type __n) 852 { __underlying_alloc.deallocate(__p, __n * sizeof(_Tp)); } 853 854 size_type max_size() const __STL_NOTHROW 855 { return size_t(-1) / sizeof(_Tp); } 856 857 void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); } 858 void destroy(pointer __p) { __p->~_Tp(); } 859 }; 860 861 template <class _Alloc> 862 class __allocator<void, _Alloc> { 863 typedef size_t size_type; 864 typedef ptrdiff_t difference_type; 865 typedef void* pointer; 866 typedef const void* const_pointer; 867 typedef void value_type; 868 869 template <class _Tp1> struct rebind { 870 typedef __allocator<_Tp1, _Alloc> other; 871 }; 872 }; 873 874 template <class _Tp, class _Alloc> 875 inline bool operator==(const __allocator<_Tp, _Alloc>& __a1, 876 const __allocator<_Tp, _Alloc>& __a2) 877 { 878 return __a1.__underlying_alloc == __a2.__underlying_alloc; 879 } 880 881 #ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER 882 template <class _Tp, class _Alloc> 883 inline bool operator!=(const __allocator<_Tp, _Alloc>& __a1, 884 const __allocator<_Tp, _Alloc>& __a2) 885 { 886 return __a1.__underlying_alloc != __a2.__underlying_alloc; 887 } 888 #endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */ 889 890 // Comparison operators for all of the predifined SGI-style allocators. 891 // This ensures that __allocator<malloc_alloc> (for example) will 892 // work correctly. 893 894 template <int inst> 895 inline bool operator==(const __malloc_alloc_template<inst>&, 896 const __malloc_alloc_template<inst>&) 897 { 898 return true; 899 } 900 901 #ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER 902 template <int __inst> 903 inline bool operator!=(const __malloc_alloc_template<__inst>&, 904 const __malloc_alloc_template<__inst>&) 905 { 906 return false; 907 } 908 #endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */ 909 910 #ifndef __USE_MALLOC 911 template <bool __threads, int __inst> 912 inline bool operator==(const __default_alloc_template<__threads, __inst>&, 913 const __default_alloc_template<__threads, __inst>&) 914 { 915 return true; 916 } 917 918 # ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER 919 template <bool __threads, int __inst> 920 inline bool operator!=(const __default_alloc_template<__threads, __inst>&, 921 const __default_alloc_template<__threads, __inst>&) 922 { 923 return false; 924 } 925 # endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */ 926 #endif 927 928 template <class _Alloc> 929 inline bool operator==(const debug_alloc<_Alloc>&, 930 const debug_alloc<_Alloc>&) { 931 return true; 932 } 933 934 #ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER 935 template <class _Alloc> 936 inline bool operator!=(const debug_alloc<_Alloc>&, 937 const debug_alloc<_Alloc>&) { 938 return false; 939 } 940 #endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */ 941 942 // Another allocator adaptor: _Alloc_traits. This serves two 943 // purposes. First, make it possible to write containers that can use 944 // either SGI-style allocators or standard-conforming allocator. 945 // Second, provide a mechanism so that containers can query whether or 946 // not the allocator has distinct instances. If not, the container 947 // can avoid wasting a word of memory to store an empty object. 948 949 // This adaptor uses partial specialization. The general case of 950 // _Alloc_traits<_Tp, _Alloc> assumes that _Alloc is a 951 // standard-conforming allocator, possibly with non-equal instances 952 // and non-static members. (It still behaves correctly even if _Alloc 953 // has static member and if all instances are equal. Refinements 954 // affect performance, not correctness.) 955 956 // There are always two members: allocator_type, which is a standard- 957 // conforming allocator type for allocating objects of type _Tp, and 958 // _S_instanceless, a static const member of type bool. If 959 // _S_instanceless is true, this means that there is no difference 960 // between any two instances of type allocator_type. Furthermore, if 961 // _S_instanceless is true, then _Alloc_traits has one additional 962 // member: _Alloc_type. This type encapsulates allocation and 963 // deallocation of objects of type _Tp through a static interface; it 964 // has two member functions, whose signatures are 965 // static _Tp* allocate(size_t) 966 // static void deallocate(_Tp*, size_t) 967 968 // The fully general version. 969 970 template <class _Tp, class _Allocator> 971 struct _Alloc_traits 972 { 973 static const bool _S_instanceless = false; 974 typedef typename _Allocator::__STL_TEMPLATE rebind<_Tp>::other 975 allocator_type; 976 }; 977 978 template <class _Tp, class _Allocator> 979 const bool _Alloc_traits<_Tp, _Allocator>::_S_instanceless; 980 981 // The version for the default allocator. 982 983 template <class _Tp, class _Tp1> 984 struct _Alloc_traits<_Tp, allocator<_Tp1> > 985 { 986 static const bool _S_instanceless = true; 987 typedef simple_alloc<_Tp, alloc> _Alloc_type; 988 typedef allocator<_Tp> allocator_type; 989 }; 990 991 // Versions for the predefined SGI-style allocators. 992 993 template <class _Tp, int __inst> 994 struct _Alloc_traits<_Tp, __malloc_alloc_template<__inst> > 995 { 996 static const bool _S_instanceless = true; 997 typedef simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type; 998 typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type; 999 }; 1000 1001 #ifndef __USE_MALLOC 1002 template <class _Tp, bool __threads, int __inst> 1003 struct _Alloc_traits<_Tp, __default_alloc_template<__threads, __inst> > 1004 { 1005 static const bool _S_instanceless = true; 1006 typedef simple_alloc<_Tp, __default_alloc_template<__threads, __inst> > 1007 _Alloc_type; 1008 typedef __allocator<_Tp, __default_alloc_template<__threads, __inst> > 1009 allocator_type; 1010 }; 1011 #endif 1012 1013 template <class _Tp, class _Alloc> 1014 struct _Alloc_traits<_Tp, debug_alloc<_Alloc> > 1015 { 1016 static const bool _S_instanceless = true; 1017 typedef simple_alloc<_Tp, debug_alloc<_Alloc> > _Alloc_type; 1018 typedef __allocator<_Tp, debug_alloc<_Alloc> > allocator_type; 1019 }; 1020 1021 // Versions for the __allocator adaptor used with the predefined 1022 // SGI-style allocators. 1023 1024 template <class _Tp, class _Tp1, int __inst> 1025 struct _Alloc_traits<_Tp, 1026 __allocator<_Tp1, __malloc_alloc_template<__inst> > > 1027 { 1028 static const bool _S_instanceless = true; 1029 typedef simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type; 1030 typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type; 1031 }; 1032 1033 #ifndef __USE_MALLOC 1034 template <class _Tp, class _Tp1, bool __thr, int __inst> 1035 struct _Alloc_traits<_Tp, 1036 __allocator<_Tp1, 1037 __default_alloc_template<__thr, __inst> > > 1038 { 1039 static const bool _S_instanceless = true; 1040 typedef simple_alloc<_Tp, __default_alloc_template<__thr,__inst> > 1041 _Alloc_type; 1042 typedef __allocator<_Tp, __default_alloc_template<__thr,__inst> > 1043 allocator_type; 1044 }; 1045 #endif 1046 1047 template <class _Tp, class _Tp1, class _Alloc> 1048 struct _Alloc_traits<_Tp, __allocator<_Tp1, debug_alloc<_Alloc> > > 1049 { 1050 static const bool _S_instanceless = true; 1051 typedef simple_alloc<_Tp, debug_alloc<_Alloc> > _Alloc_type; 1052 typedef __allocator<_Tp, debug_alloc<_Alloc> > allocator_type; 1053 }; 1054 1055 1056 #endif /* __STL_USE_STD_ALLOCATORS */ 1057 1058 #if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32) 1059 #pragma reset woff 1174 1060 #endif 1061 1062 __STL_END_NAMESPACE 1063 1064 #undef __PRIVATE 1065 1066 #endif /* __SGI_STL_INTERNAL_ALLOC_H */ 1067 1068 // Local Variables: 1069 // mode:C++ 1070 // End: 1071