|
4 | 4 | */
|
5 | 5 |
|
6 | 6 | #define _GNU_SOURCE
|
| 7 | +#ifdef HAVE_LIBCPUPOWER_SUPPORT |
| 8 | +#include <cpuidle.h> |
| 9 | +#endif /* HAVE_LIBCPUPOWER_SUPPORT */ |
7 | 10 | #include <dirent.h>
|
8 | 11 | #include <stdarg.h>
|
9 | 12 | #include <stdlib.h>
|
@@ -515,6 +518,153 @@ int set_cpu_dma_latency(int32_t latency)
|
515 | 518 | return fd;
|
516 | 519 | }
|
517 | 520 |
|
| 521 | +#ifdef HAVE_LIBCPUPOWER_SUPPORT |
| 522 | +static unsigned int **saved_cpu_idle_disable_state; |
| 523 | +static size_t saved_cpu_idle_disable_state_alloc_ctr; |
| 524 | + |
| 525 | +/* |
| 526 | + * save_cpu_idle_state_disable - save disable for all idle states of a cpu |
| 527 | + * |
| 528 | + * Saves the current disable of all idle states of a cpu, to be subsequently |
| 529 | + * restored via restore_cpu_idle_disable_state. |
| 530 | + * |
| 531 | + * Return: idle state count on success, negative on error |
| 532 | + */ |
| 533 | +int save_cpu_idle_disable_state(unsigned int cpu) |
| 534 | +{ |
| 535 | + unsigned int nr_states; |
| 536 | + unsigned int state; |
| 537 | + int disabled; |
| 538 | + int nr_cpus; |
| 539 | + |
| 540 | + nr_states = cpuidle_state_count(cpu); |
| 541 | + |
| 542 | + if (nr_states == 0) |
| 543 | + return 0; |
| 544 | + |
| 545 | + if (saved_cpu_idle_disable_state == NULL) { |
| 546 | + nr_cpus = sysconf(_SC_NPROCESSORS_CONF); |
| 547 | + saved_cpu_idle_disable_state = calloc(nr_cpus, sizeof(unsigned int *)); |
| 548 | + if (!saved_cpu_idle_disable_state) |
| 549 | + return -1; |
| 550 | + } |
| 551 | + |
| 552 | + saved_cpu_idle_disable_state[cpu] = calloc(nr_states, sizeof(unsigned int)); |
| 553 | + if (!saved_cpu_idle_disable_state[cpu]) |
| 554 | + return -1; |
| 555 | + saved_cpu_idle_disable_state_alloc_ctr++; |
| 556 | + |
| 557 | + for (state = 0; state < nr_states; state++) { |
| 558 | + disabled = cpuidle_is_state_disabled(cpu, state); |
| 559 | + if (disabled < 0) |
| 560 | + return disabled; |
| 561 | + saved_cpu_idle_disable_state[cpu][state] = disabled; |
| 562 | + } |
| 563 | + |
| 564 | + return nr_states; |
| 565 | +} |
| 566 | + |
| 567 | +/* |
| 568 | + * restore_cpu_idle_disable_state - restore disable for all idle states of a cpu |
| 569 | + * |
| 570 | + * Restores the current disable state of all idle states of a cpu that was |
| 571 | + * previously saved by save_cpu_idle_disable_state. |
| 572 | + * |
| 573 | + * Return: idle state count on success, negative on error |
| 574 | + */ |
| 575 | +int restore_cpu_idle_disable_state(unsigned int cpu) |
| 576 | +{ |
| 577 | + unsigned int nr_states; |
| 578 | + unsigned int state; |
| 579 | + int disabled; |
| 580 | + int result; |
| 581 | + |
| 582 | + nr_states = cpuidle_state_count(cpu); |
| 583 | + |
| 584 | + if (nr_states == 0) |
| 585 | + return 0; |
| 586 | + |
| 587 | + if (!saved_cpu_idle_disable_state) |
| 588 | + return -1; |
| 589 | + |
| 590 | + for (state = 0; state < nr_states; state++) { |
| 591 | + if (!saved_cpu_idle_disable_state[cpu]) |
| 592 | + return -1; |
| 593 | + disabled = saved_cpu_idle_disable_state[cpu][state]; |
| 594 | + result = cpuidle_state_disable(cpu, state, disabled); |
| 595 | + if (result < 0) |
| 596 | + return result; |
| 597 | + } |
| 598 | + |
| 599 | + free(saved_cpu_idle_disable_state[cpu]); |
| 600 | + saved_cpu_idle_disable_state[cpu] = NULL; |
| 601 | + saved_cpu_idle_disable_state_alloc_ctr--; |
| 602 | + if (saved_cpu_idle_disable_state_alloc_ctr == 0) { |
| 603 | + free(saved_cpu_idle_disable_state); |
| 604 | + saved_cpu_idle_disable_state = NULL; |
| 605 | + } |
| 606 | + |
| 607 | + return nr_states; |
| 608 | +} |
| 609 | + |
| 610 | +/* |
| 611 | + * free_cpu_idle_disable_states - free saved idle state disable for all cpus |
| 612 | + * |
| 613 | + * Frees the memory used for storing cpu idle state disable for all cpus |
| 614 | + * and states. |
| 615 | + * |
| 616 | + * Normally, the memory is freed automatically in |
| 617 | + * restore_cpu_idle_disable_state; this is mostly for cleaning up after an |
| 618 | + * error. |
| 619 | + */ |
| 620 | +void free_cpu_idle_disable_states(void) |
| 621 | +{ |
| 622 | + int cpu; |
| 623 | + int nr_cpus; |
| 624 | + |
| 625 | + if (!saved_cpu_idle_disable_state) |
| 626 | + return; |
| 627 | + |
| 628 | + nr_cpus = sysconf(_SC_NPROCESSORS_CONF); |
| 629 | + |
| 630 | + for (cpu = 0; cpu < nr_cpus; cpu++) { |
| 631 | + free(saved_cpu_idle_disable_state[cpu]); |
| 632 | + saved_cpu_idle_disable_state[cpu] = NULL; |
| 633 | + } |
| 634 | + |
| 635 | + free(saved_cpu_idle_disable_state); |
| 636 | + saved_cpu_idle_disable_state = NULL; |
| 637 | +} |
| 638 | + |
| 639 | +/* |
| 640 | + * set_deepest_cpu_idle_state - limit idle state of cpu |
| 641 | + * |
| 642 | + * Disables all idle states deeper than the one given in |
| 643 | + * deepest_state (assuming states with higher number are deeper). |
| 644 | + * |
| 645 | + * This is used to reduce the exit from idle latency. Unlike |
| 646 | + * set_cpu_dma_latency, it can disable idle states per cpu. |
| 647 | + * |
| 648 | + * Return: idle state count on success, negative on error |
| 649 | + */ |
| 650 | +int set_deepest_cpu_idle_state(unsigned int cpu, unsigned int deepest_state) |
| 651 | +{ |
| 652 | + unsigned int nr_states; |
| 653 | + unsigned int state; |
| 654 | + int result; |
| 655 | + |
| 656 | + nr_states = cpuidle_state_count(cpu); |
| 657 | + |
| 658 | + for (state = deepest_state + 1; state < nr_states; state++) { |
| 659 | + result = cpuidle_state_disable(cpu, state, 1); |
| 660 | + if (result < 0) |
| 661 | + return result; |
| 662 | + } |
| 663 | + |
| 664 | + return nr_states; |
| 665 | +} |
| 666 | +#endif /* HAVE_LIBCPUPOWER_SUPPORT */ |
| 667 | + |
518 | 668 | #define _STR(x) #x
|
519 | 669 | #define STR(x) _STR(x)
|
520 | 670 |
|
|
0 commit comments