1 /* 2 * Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include "Model.h" 8 9 #include <new> 10 11 #include <stdio.h> 12 #include <stdlib.h> 13 14 #include <AutoDeleter.h> 15 16 #include <thread_defs.h> 17 18 19 20 static const char* const kThreadStateNames[] = { 21 "running", 22 "still running", 23 "preempted", 24 "ready", 25 "waiting", 26 "unknown" 27 }; 28 29 30 const char* 31 thread_state_name(ThreadState state) 32 { 33 return kThreadStateNames[state]; 34 } 35 36 37 const char* 38 wait_object_type_name(uint32 type) 39 { 40 switch (type) { 41 case THREAD_BLOCK_TYPE_SEMAPHORE: 42 return "semaphore"; 43 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE: 44 return "condition"; 45 case THREAD_BLOCK_TYPE_MUTEX: 46 return "mutex"; 47 case THREAD_BLOCK_TYPE_RW_LOCK: 48 return "rw lock"; 49 case THREAD_BLOCK_TYPE_OTHER: 50 return "other"; 51 case THREAD_BLOCK_TYPE_SNOOZE: 52 return "snooze"; 53 case THREAD_BLOCK_TYPE_SIGNAL: 54 return "signal"; 55 default: 56 return "unknown"; 57 } 58 } 59 60 61 // #pragma mark - CPU 62 63 64 Model::CPU::CPU() 65 : 66 fIdleTime(0) 67 { 68 } 69 70 71 void 72 Model::CPU::SetIdleTime(nanotime_t time) 73 { 74 fIdleTime = time; 75 } 76 77 78 // #pragma mark - IORequest 79 80 81 Model::IORequest::IORequest( 82 system_profiler_io_request_scheduled* scheduledEvent, 83 system_profiler_io_request_finished* finishedEvent, size_t operationCount) 84 : 85 scheduledEvent(scheduledEvent), 86 finishedEvent(finishedEvent), 87 operationCount(operationCount) 88 { 89 } 90 91 92 Model::IORequest::~IORequest() 93 { 94 } 95 96 97 /*static*/ Model::IORequest* 98 Model::IORequest::Create(system_profiler_io_request_scheduled* scheduledEvent, 99 system_profiler_io_request_finished* finishedEvent, size_t operationCount) 100 { 101 void* memory = malloc( 102 sizeof(IORequest) + operationCount * sizeof(IOOperation)); 103 if (memory == NULL) 104 return NULL; 105 106 return new(memory) IORequest(scheduledEvent, finishedEvent, operationCount); 107 } 108 109 110 void 111 Model::IORequest::Delete() 112 { 113 free(this); 114 } 115 116 117 // #pragma mark - IOScheduler 118 119 120 Model::IOScheduler::IOScheduler(system_profiler_io_scheduler_added* event, 121 int32 index) 122 : 123 fAddedEvent(event), 124 fIndex(index) 125 { 126 } 127 128 129 // #pragma mark - WaitObject 130 131 132 Model::WaitObject::WaitObject(const system_profiler_wait_object_info* event) 133 : 134 fEvent(event), 135 fWaits(0), 136 fTotalWaitTime(0) 137 { 138 } 139 140 141 Model::WaitObject::~WaitObject() 142 { 143 } 144 145 146 void 147 Model::WaitObject::AddWait(nanotime_t waitTime) 148 { 149 fWaits++; 150 fTotalWaitTime += waitTime; 151 } 152 153 154 // #pragma mark - WaitObjectGroup 155 156 157 Model::WaitObjectGroup::WaitObjectGroup(WaitObject* waitObject) 158 : 159 fWaits(-1), 160 fTotalWaitTime(-1) 161 { 162 fWaitObjects.AddItem(waitObject); 163 } 164 165 166 Model::WaitObjectGroup::~WaitObjectGroup() 167 { 168 } 169 170 171 int64 172 Model::WaitObjectGroup::Waits() 173 { 174 if (fWaits < 0) 175 _ComputeWaits(); 176 177 return fWaits; 178 } 179 180 181 nanotime_t 182 Model::WaitObjectGroup::TotalWaitTime() 183 { 184 if (fTotalWaitTime < 0) 185 _ComputeWaits(); 186 187 return fTotalWaitTime; 188 } 189 190 191 void 192 Model::WaitObjectGroup::_ComputeWaits() 193 { 194 fWaits = 0; 195 fTotalWaitTime = 0; 196 197 for (int32 i = fWaitObjects.CountItems(); i-- > 0;) { 198 WaitObject* waitObject = fWaitObjects.ItemAt(i); 199 200 fWaits += waitObject->Waits(); 201 fTotalWaitTime += waitObject->TotalWaitTime(); 202 } 203 } 204 205 206 // #pragma mark - ThreadWaitObject 207 208 209 Model::ThreadWaitObject::ThreadWaitObject(WaitObject* waitObject) 210 : 211 fWaitObject(waitObject), 212 fWaits(0), 213 fTotalWaitTime(0) 214 { 215 } 216 217 218 Model::ThreadWaitObject::~ThreadWaitObject() 219 { 220 } 221 222 223 void 224 Model::ThreadWaitObject::AddWait(nanotime_t waitTime) 225 { 226 fWaits++; 227 fTotalWaitTime += waitTime; 228 229 fWaitObject->AddWait(waitTime); 230 } 231 232 233 // #pragma mark - ThreadWaitObjectGroup 234 235 236 Model::ThreadWaitObjectGroup::ThreadWaitObjectGroup( 237 ThreadWaitObject* threadWaitObject) 238 { 239 fWaitObjects.Add(threadWaitObject); 240 } 241 242 243 Model::ThreadWaitObjectGroup::~ThreadWaitObjectGroup() 244 { 245 } 246 247 248 bool 249 Model::ThreadWaitObjectGroup::GetThreadWaitObjects( 250 BObjectList<ThreadWaitObject>& objects) 251 { 252 ThreadWaitObjectList::Iterator it = fWaitObjects.GetIterator(); 253 while (ThreadWaitObject* object = it.Next()) { 254 if (!objects.AddItem(object)) 255 return false; 256 } 257 258 return true; 259 } 260 261 262 // #pragma mark - Team 263 264 265 Model::Team::Team(const system_profiler_team_added* event, nanotime_t time) 266 : 267 fCreationEvent(event), 268 fCreationTime(time), 269 fDeletionTime(-1), 270 fThreads(10) 271 { 272 } 273 274 275 Model::Team::~Team() 276 { 277 } 278 279 280 bool 281 Model::Team::AddThread(Thread* thread) 282 { 283 return fThreads.BinaryInsert(thread, &Thread::CompareByCreationTimeID); 284 } 285 286 287 // #pragma mark - Thread 288 289 290 Model::Thread::Thread(Team* team, const system_profiler_thread_added* event, 291 nanotime_t time) 292 : 293 fEvents(NULL), 294 fEventCount(0), 295 fIORequests(NULL), 296 fIORequestCount(0), 297 fTeam(team), 298 fCreationEvent(event), 299 fCreationTime(time), 300 fDeletionTime(-1), 301 fRuns(0), 302 fTotalRunTime(0), 303 fMinRunTime(-1), 304 fMaxRunTime(-1), 305 fLatencies(0), 306 fTotalLatency(0), 307 fMinLatency(-1), 308 fMaxLatency(-1), 309 fReruns(0), 310 fTotalRerunTime(0), 311 fMinRerunTime(-1), 312 fMaxRerunTime(-1), 313 fWaits(0), 314 fTotalWaitTime(0), 315 fUnspecifiedWaitTime(0), 316 fIOCount(0), 317 fIOTime(0), 318 fPreemptions(0), 319 fIndex(-1), 320 fWaitObjectGroups(20, true) 321 { 322 } 323 324 325 Model::Thread::~Thread() 326 { 327 if (fIORequests != NULL) { 328 for (size_t i = 0; i < fIORequestCount; i++) 329 fIORequests[i]->Delete(); 330 331 delete[] fIORequests; 332 } 333 334 delete[] fEvents; 335 } 336 337 338 void 339 Model::Thread::SetEvents(system_profiler_event_header** events, 340 size_t eventCount) 341 { 342 fEvents = events; 343 fEventCount = eventCount; 344 } 345 346 347 void 348 Model::Thread::SetIORequests(IORequest** requests, size_t requestCount) 349 { 350 fIORequests = requests; 351 fIORequestCount = requestCount; 352 } 353 354 355 size_t 356 Model::Thread::ClosestRequestStartIndex(nanotime_t minRequestStartTime) const 357 { 358 size_t lower = 0; 359 size_t upper = fIORequestCount; 360 while (lower < upper) { 361 size_t mid = (lower + upper) / 2; 362 IORequest* request = fIORequests[mid]; 363 364 if (request->ScheduledTime() < minRequestStartTime) 365 lower = mid + 1; 366 else 367 upper = mid; 368 } 369 370 return lower; 371 } 372 373 374 Model::ThreadWaitObjectGroup* 375 Model::Thread::ThreadWaitObjectGroupFor(uint32 type, addr_t object) const 376 { 377 type_and_object key; 378 key.type = type; 379 key.object = object; 380 381 return fWaitObjectGroups.BinarySearchByKey(key, 382 &ThreadWaitObjectGroup::CompareWithTypeObject); 383 } 384 385 386 void 387 Model::Thread::AddRun(nanotime_t runTime) 388 { 389 fRuns++; 390 fTotalRunTime += runTime; 391 392 if (fMinRunTime < 0 || runTime < fMinRunTime) 393 fMinRunTime = runTime; 394 if (runTime > fMaxRunTime) 395 fMaxRunTime = runTime; 396 } 397 398 399 void 400 Model::Thread::AddRerun(nanotime_t runTime) 401 { 402 fReruns++; 403 fTotalRerunTime += runTime; 404 405 if (fMinRerunTime < 0 || runTime < fMinRerunTime) 406 fMinRerunTime = runTime; 407 if (runTime > fMaxRerunTime) 408 fMaxRerunTime = runTime; 409 } 410 411 412 void 413 Model::Thread::AddLatency(nanotime_t latency) 414 { 415 fLatencies++; 416 fTotalLatency += latency; 417 418 if (fMinLatency < 0 || latency < fMinLatency) 419 fMinLatency = latency; 420 if (latency > fMaxLatency) 421 fMaxLatency = latency; 422 } 423 424 425 void 426 Model::Thread::AddPreemption(nanotime_t runTime) 427 { 428 fPreemptions++; 429 } 430 431 432 void 433 Model::Thread::AddWait(nanotime_t waitTime) 434 { 435 fWaits++; 436 fTotalWaitTime += waitTime; 437 } 438 439 440 void 441 Model::Thread::AddUnspecifiedWait(nanotime_t waitTime) 442 { 443 fUnspecifiedWaitTime += waitTime; 444 } 445 446 447 Model::ThreadWaitObject* 448 Model::Thread::AddThreadWaitObject(WaitObject* waitObject, 449 ThreadWaitObjectGroup** _threadWaitObjectGroup) 450 { 451 // create a thread wait object 452 ThreadWaitObject* threadWaitObject 453 = new(std::nothrow) ThreadWaitObject(waitObject); 454 if (threadWaitObject == NULL) 455 return NULL; 456 457 // find the thread wait object group 458 ThreadWaitObjectGroup* threadWaitObjectGroup 459 = ThreadWaitObjectGroupFor(waitObject->Type(), waitObject->Object()); 460 if (threadWaitObjectGroup == NULL) { 461 // doesn't exist yet -- create 462 threadWaitObjectGroup = new(std::nothrow) ThreadWaitObjectGroup( 463 threadWaitObject); 464 if (threadWaitObjectGroup == NULL) { 465 delete threadWaitObject; 466 return NULL; 467 } 468 469 // add to the list 470 if (!fWaitObjectGroups.BinaryInsert(threadWaitObjectGroup, 471 &ThreadWaitObjectGroup::CompareByTypeObject)) { 472 delete threadWaitObjectGroup; 473 return NULL; 474 } 475 } else { 476 // exists -- just add the object 477 threadWaitObjectGroup->AddWaitObject(threadWaitObject); 478 } 479 480 if (_threadWaitObjectGroup != NULL) 481 *_threadWaitObjectGroup = threadWaitObjectGroup; 482 483 return threadWaitObject; 484 } 485 486 487 void 488 Model::Thread::SetIOs(int64 count, nanotime_t time) 489 { 490 fIOCount = count; 491 fIOTime = time; 492 } 493 494 495 // #pragma mark - SchedulingState 496 497 498 Model::SchedulingState::~SchedulingState() 499 { 500 Clear(); 501 } 502 503 504 status_t 505 Model::SchedulingState::Init() 506 { 507 status_t error = fThreadStates.Init(); 508 if (error != B_OK) 509 return error; 510 511 return B_OK; 512 } 513 514 515 status_t 516 Model::SchedulingState::Init(const CompactSchedulingState* state) 517 { 518 status_t error = Init(); 519 if (error != B_OK) 520 return error; 521 522 if (state == NULL) 523 return B_OK; 524 525 fLastEventTime = state->LastEventTime(); 526 for (int32 i = 0; const CompactThreadSchedulingState* compactThreadState 527 = state->ThreadStateAt(i); i++) { 528 ThreadSchedulingState* threadState 529 = new(std::nothrow) ThreadSchedulingState(*compactThreadState); 530 if (threadState == NULL) 531 return B_NO_MEMORY; 532 533 fThreadStates.Insert(threadState); 534 } 535 536 return B_OK; 537 } 538 539 540 void 541 Model::SchedulingState::Clear() 542 { 543 ThreadSchedulingState* state = fThreadStates.Clear(true); 544 while (state != NULL) { 545 ThreadSchedulingState* next = state->next; 546 DeleteThread(state); 547 state = next; 548 } 549 550 fLastEventTime = -1; 551 } 552 553 void 554 Model::SchedulingState::DeleteThread(ThreadSchedulingState* thread) 555 { 556 delete thread; 557 } 558 559 560 // #pragma mark - CompactSchedulingState 561 562 563 /*static*/ Model::CompactSchedulingState* 564 Model::CompactSchedulingState::Create(const SchedulingState& state, 565 off_t eventOffset) 566 { 567 nanotime_t lastEventTime = state.LastEventTime(); 568 569 // count the active threads 570 int32 threadCount = 0; 571 for (ThreadSchedulingStateTable::Iterator it 572 = state.ThreadStates().GetIterator(); 573 ThreadSchedulingState* threadState = it.Next();) { 574 Thread* thread = threadState->thread; 575 if (thread->CreationTime() <= lastEventTime 576 && (thread->DeletionTime() == -1 577 || thread->DeletionTime() >= lastEventTime)) { 578 threadCount++; 579 } 580 } 581 582 CompactSchedulingState* compactState = (CompactSchedulingState*)malloc( 583 sizeof(CompactSchedulingState) 584 + threadCount * sizeof(CompactThreadSchedulingState)); 585 if (compactState == NULL) 586 return NULL; 587 588 // copy the state info 589 compactState->fEventOffset = eventOffset; 590 compactState->fThreadCount = threadCount; 591 compactState->fLastEventTime = lastEventTime; 592 593 int32 threadIndex = 0; 594 for (ThreadSchedulingStateTable::Iterator it 595 = state.ThreadStates().GetIterator(); 596 ThreadSchedulingState* threadState = it.Next();) { 597 Thread* thread = threadState->thread; 598 if (thread->CreationTime() <= lastEventTime 599 && (thread->DeletionTime() == -1 600 || thread->DeletionTime() >= lastEventTime)) { 601 compactState->fThreadStates[threadIndex++] = *threadState; 602 } 603 } 604 605 return compactState; 606 } 607 608 609 void 610 Model::CompactSchedulingState::Delete() 611 { 612 free(this); 613 } 614 615 616 // #pragma mark - Model 617 618 619 Model::Model(const char* dataSourceName, void* eventData, size_t eventDataSize, 620 system_profiler_event_header** events, size_t eventCount) 621 : 622 fDataSourceName(dataSourceName), 623 fEventData(eventData), 624 fEvents(events), 625 fEventDataSize(eventDataSize), 626 fEventCount(eventCount), 627 fCPUCount(1), 628 fBaseTime(0), 629 fLastEventTime(0), 630 fIdleTime(0), 631 fCPUs(20, true), 632 fTeams(20, true), 633 fThreads(20, true), 634 fWaitObjectGroups(20, true), 635 fIOSchedulers(10, true), 636 fSchedulingStates(100) 637 { 638 } 639 640 641 Model::~Model() 642 { 643 for (int32 i = 0; CompactSchedulingState* state 644 = fSchedulingStates.ItemAt(i); i++) { 645 state->Delete(); 646 } 647 648 delete[] fEvents; 649 650 free(fEventData); 651 652 for (int32 i = 0; void* data = fAssociatedData.ItemAt(i); i++) 653 free(data); 654 } 655 656 657 size_t 658 Model::ClosestEventIndex(nanotime_t eventTime) const 659 { 660 // The events themselves are unmodified and use an absolute time. 661 eventTime += fBaseTime; 662 663 // Binary search the event. Since not all events have a timestamp, we have 664 // to do a bit of iteration, too. 665 size_t lower = 0; 666 size_t upper = CountEvents(); 667 while (lower < upper) { 668 size_t mid = (lower + upper) / 2; 669 while (mid < upper) { 670 system_profiler_event_header* header = fEvents[mid]; 671 switch (header->event) { 672 case B_SYSTEM_PROFILER_THREAD_SCHEDULED: 673 case B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE: 674 case B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE: 675 break; 676 default: 677 mid++; 678 continue; 679 } 680 681 break; 682 } 683 684 if (mid == upper) { 685 lower = mid; 686 break; 687 } 688 689 system_profiler_thread_scheduling_event* event 690 = (system_profiler_thread_scheduling_event*)(fEvents[mid] + 1); 691 if (event->time < eventTime) 692 lower = mid + 1; 693 else 694 upper = mid; 695 } 696 697 return lower; 698 } 699 700 701 bool 702 Model::AddAssociatedData(void* data) 703 { 704 return fAssociatedData.AddItem(data); 705 } 706 707 708 void 709 Model::RemoveAssociatedData(void* data) 710 { 711 fAssociatedData.RemoveItem(data); 712 } 713 714 715 void 716 Model::LoadingFinished() 717 { 718 // set the thread indices 719 for (int32 i = 0; Thread* thread = fThreads.ItemAt(i); i++) 720 thread->SetIndex(i); 721 722 // compute the total idle time 723 fIdleTime = 0; 724 for (int32 i = 0; CPU* cpu = CPUAt(i); i++) 725 fIdleTime += cpu->IdleTime(); 726 } 727 728 729 void 730 Model::SetBaseTime(nanotime_t time) 731 { 732 fBaseTime = time; 733 } 734 735 736 void 737 Model::SetLastEventTime(nanotime_t time) 738 { 739 fLastEventTime = time; 740 } 741 742 743 bool 744 Model::SetCPUCount(int32 count) 745 { 746 fCPUCount = count; 747 748 fCPUs.MakeEmpty(); 749 750 for (int32 i = 0; i < fCPUCount; i++) { 751 CPU* cpu = new(std::nothrow) CPU; 752 if (cpu == NULL || !fCPUs.AddItem(cpu)) { 753 delete cpu; 754 return false; 755 } 756 } 757 758 return true; 759 } 760 761 762 int32 763 Model::CountTeams() const 764 { 765 return fTeams.CountItems(); 766 } 767 768 769 Model::Team* 770 Model::TeamAt(int32 index) const 771 { 772 return fTeams.ItemAt(index); 773 } 774 775 776 Model::Team* 777 Model::TeamByID(team_id id) const 778 { 779 return fTeams.BinarySearchByKey(id, &Team::CompareWithID); 780 } 781 782 783 Model::Team* 784 Model::AddTeam(const system_profiler_team_added* event, nanotime_t time) 785 { 786 Team* team = TeamByID(event->team); 787 if (team != NULL) { 788 fprintf(stderr, "Duplicate team: %" B_PRId32 "\n", event->team); 789 // TODO: User feedback! 790 return team; 791 } 792 793 team = new(std::nothrow) Team(event, time); 794 if (team == NULL) 795 return NULL; 796 797 if (!fTeams.BinaryInsert(team, &Team::CompareByID)) { 798 delete team; 799 return NULL; 800 } 801 802 return team; 803 } 804 805 806 int32 807 Model::CountThreads() const 808 { 809 return fThreads.CountItems(); 810 } 811 812 813 Model::Thread* 814 Model::ThreadAt(int32 index) const 815 { 816 return fThreads.ItemAt(index); 817 } 818 819 820 Model::Thread* 821 Model::ThreadByID(thread_id id) const 822 { 823 return fThreads.BinarySearchByKey(id, &Thread::CompareWithID); 824 } 825 826 827 Model::Thread* 828 Model::AddThread(const system_profiler_thread_added* event, nanotime_t time) 829 { 830 // check whether we do already know the thread 831 Thread* thread = ThreadByID(event->thread); 832 if (thread != NULL) { 833 fprintf(stderr, "Duplicate thread: %" B_PRId32 "\n", event->thread); 834 // TODO: User feedback! 835 return thread; 836 } 837 838 // get its team 839 Team* team = TeamByID(event->team); 840 if (team == NULL) { 841 fprintf(stderr, "No team for thread: %" B_PRId32 "\n", event->thread); 842 return NULL; 843 } 844 845 // create the thread and add it 846 thread = new(std::nothrow) Thread(team, event, time); 847 if (thread == NULL) 848 return NULL; 849 ObjectDeleter<Thread> threadDeleter(thread); 850 851 if (!fThreads.BinaryInsert(thread, &Thread::CompareByID)) 852 return NULL; 853 854 if (!team->AddThread(thread)) { 855 fThreads.RemoveItem(thread); 856 return NULL; 857 } 858 859 threadDeleter.Detach(); 860 return thread; 861 } 862 863 864 Model::WaitObject* 865 Model::AddWaitObject(const system_profiler_wait_object_info* event, 866 WaitObjectGroup** _waitObjectGroup) 867 { 868 // create a wait object 869 WaitObject* waitObject = new(std::nothrow) WaitObject(event); 870 if (waitObject == NULL) 871 return NULL; 872 873 // find the wait object group 874 WaitObjectGroup* waitObjectGroup 875 = WaitObjectGroupFor(waitObject->Type(), waitObject->Object()); 876 if (waitObjectGroup == NULL) { 877 // doesn't exist yet -- create 878 waitObjectGroup = new(std::nothrow) WaitObjectGroup(waitObject); 879 if (waitObjectGroup == NULL) { 880 delete waitObject; 881 return NULL; 882 } 883 884 // add to the list 885 if (!fWaitObjectGroups.BinaryInsert(waitObjectGroup, 886 &WaitObjectGroup::CompareByTypeObject)) { 887 delete waitObjectGroup; 888 return NULL; 889 } 890 } else { 891 // exists -- just add the object 892 waitObjectGroup->AddWaitObject(waitObject); 893 } 894 895 if (_waitObjectGroup != NULL) 896 *_waitObjectGroup = waitObjectGroup; 897 898 return waitObject; 899 } 900 901 902 int32 903 Model::CountWaitObjectGroups() const 904 { 905 return fWaitObjectGroups.CountItems(); 906 } 907 908 909 Model::WaitObjectGroup* 910 Model::WaitObjectGroupAt(int32 index) const 911 { 912 return fWaitObjectGroups.ItemAt(index); 913 } 914 915 916 Model::WaitObjectGroup* 917 Model::WaitObjectGroupFor(uint32 type, addr_t object) const 918 { 919 type_and_object key; 920 key.type = type; 921 key.object = object; 922 923 return fWaitObjectGroups.BinarySearchByKey(key, 924 &WaitObjectGroup::CompareWithTypeObject); 925 } 926 927 928 Model::ThreadWaitObject* 929 Model::AddThreadWaitObject(thread_id threadID, WaitObject* waitObject, 930 ThreadWaitObjectGroup** _threadWaitObjectGroup) 931 { 932 Thread* thread = ThreadByID(threadID); 933 if (thread == NULL) 934 return NULL; 935 936 return thread->AddThreadWaitObject(waitObject, _threadWaitObjectGroup); 937 } 938 939 940 Model::ThreadWaitObjectGroup* 941 Model::ThreadWaitObjectGroupFor(thread_id threadID, uint32 type, addr_t object) const 942 { 943 Thread* thread = ThreadByID(threadID); 944 if (thread == NULL) 945 return NULL; 946 947 return thread->ThreadWaitObjectGroupFor(type, object); 948 } 949 950 951 int32 952 Model::CountIOSchedulers() const 953 { 954 return fIOSchedulers.CountItems(); 955 } 956 957 958 Model::IOScheduler* 959 Model::IOSchedulerAt(int32 index) const 960 { 961 return fIOSchedulers.ItemAt(index); 962 } 963 964 965 Model::IOScheduler* 966 Model::IOSchedulerByID(int32 id) const 967 { 968 for (int32 i = 0; IOScheduler* scheduler = fIOSchedulers.ItemAt(i); i++) { 969 if (scheduler->ID() == id) 970 return scheduler; 971 } 972 973 return NULL; 974 } 975 976 977 Model::IOScheduler* 978 Model::AddIOScheduler(system_profiler_io_scheduler_added* event) 979 { 980 IOScheduler* scheduler = new(std::nothrow) IOScheduler(event, 981 fIOSchedulers.CountItems()); 982 if (scheduler == NULL || !fIOSchedulers.AddItem(scheduler)) { 983 delete scheduler; 984 return NULL; 985 } 986 987 return scheduler; 988 } 989 990 991 bool 992 Model::AddSchedulingStateSnapshot(const SchedulingState& state, 993 off_t eventOffset) 994 { 995 CompactSchedulingState* compactState = CompactSchedulingState::Create(state, 996 eventOffset); 997 if (compactState == NULL) 998 return false; 999 1000 if (!fSchedulingStates.AddItem(compactState)) { 1001 compactState->Delete(); 1002 return false; 1003 } 1004 1005 return true; 1006 } 1007 1008 1009 const Model::CompactSchedulingState* 1010 Model::ClosestSchedulingState(nanotime_t eventTime) const 1011 { 1012 int32 index = fSchedulingStates.BinarySearchIndexByKey(eventTime, 1013 &_CompareEventTimeSchedulingState); 1014 if (index >= 0) 1015 return fSchedulingStates.ItemAt(index); 1016 1017 // no exact match 1018 index = -index - 1; 1019 return index > 0 ? fSchedulingStates.ItemAt(index - 1) : NULL; 1020 } 1021 1022 1023 /*static*/ int 1024 Model::_CompareEventTimeSchedulingState(const nanotime_t* time, 1025 const CompactSchedulingState* state) 1026 { 1027 if (*time < state->LastEventTime()) 1028 return -1; 1029 return *time == state->LastEventTime() ? 0 : 1; 1030 } 1031 1032