|
8 | 8 | * the applicable attributes with the node's interfaces.
|
9 | 9 | */
|
10 | 10 |
|
| 11 | +#define pr_fmt(fmt) "acpi/hmat: " fmt |
| 12 | +#define dev_fmt(fmt) "acpi/hmat: " fmt |
| 13 | + |
11 | 14 | #include <linux/acpi.h>
|
12 | 15 | #include <linux/bitops.h>
|
13 | 16 | #include <linux/device.h>
|
14 | 17 | #include <linux/init.h>
|
15 | 18 | #include <linux/list.h>
|
| 19 | +#include <linux/mm.h> |
| 20 | +#include <linux/platform_device.h> |
16 | 21 | #include <linux/list_sort.h>
|
| 22 | +#include <linux/memregion.h> |
17 | 23 | #include <linux/memory.h>
|
18 | 24 | #include <linux/mutex.h>
|
19 | 25 | #include <linux/node.h>
|
@@ -49,6 +55,7 @@ struct memory_target {
|
49 | 55 | struct list_head node;
|
50 | 56 | unsigned int memory_pxm;
|
51 | 57 | unsigned int processor_pxm;
|
| 58 | + struct resource memregions; |
52 | 59 | struct node_hmem_attrs hmem_attrs;
|
53 | 60 | struct list_head caches;
|
54 | 61 | struct node_cache_attrs cache_attrs;
|
@@ -104,22 +111,36 @@ static __init void alloc_memory_initiator(unsigned int cpu_pxm)
|
104 | 111 | list_add_tail(&initiator->node, &initiators);
|
105 | 112 | }
|
106 | 113 |
|
107 |
| -static __init void alloc_memory_target(unsigned int mem_pxm) |
| 114 | +static __init void alloc_memory_target(unsigned int mem_pxm, |
| 115 | + resource_size_t start, resource_size_t len) |
108 | 116 | {
|
109 | 117 | struct memory_target *target;
|
110 | 118 |
|
111 | 119 | target = find_mem_target(mem_pxm);
|
112 |
| - if (target) |
113 |
| - return; |
114 |
| - |
115 |
| - target = kzalloc(sizeof(*target), GFP_KERNEL); |
116 |
| - if (!target) |
117 |
| - return; |
| 120 | + if (!target) { |
| 121 | + target = kzalloc(sizeof(*target), GFP_KERNEL); |
| 122 | + if (!target) |
| 123 | + return; |
| 124 | + target->memory_pxm = mem_pxm; |
| 125 | + target->processor_pxm = PXM_INVAL; |
| 126 | + target->memregions = (struct resource) { |
| 127 | + .name = "ACPI mem", |
| 128 | + .start = 0, |
| 129 | + .end = -1, |
| 130 | + .flags = IORESOURCE_MEM, |
| 131 | + }; |
| 132 | + list_add_tail(&target->node, &targets); |
| 133 | + INIT_LIST_HEAD(&target->caches); |
| 134 | + } |
118 | 135 |
|
119 |
| - target->memory_pxm = mem_pxm; |
120 |
| - target->processor_pxm = PXM_INVAL; |
121 |
| - list_add_tail(&target->node, &targets); |
122 |
| - INIT_LIST_HEAD(&target->caches); |
| 136 | + /* |
| 137 | + * There are potentially multiple ranges per PXM, so record each |
| 138 | + * in the per-target memregions resource tree. |
| 139 | + */ |
| 140 | + if (!__request_region(&target->memregions, start, len, "memory target", |
| 141 | + IORESOURCE_MEM)) |
| 142 | + pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n", |
| 143 | + start, start + len, mem_pxm); |
123 | 144 | }
|
124 | 145 |
|
125 | 146 | static __init const char *hmat_data_type(u8 type)
|
@@ -452,7 +473,7 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
|
452 | 473 | return -EINVAL;
|
453 | 474 | if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
|
454 | 475 | return 0;
|
455 |
| - alloc_memory_target(ma->proximity_domain); |
| 476 | + alloc_memory_target(ma->proximity_domain, ma->base_address, ma->length); |
456 | 477 | return 0;
|
457 | 478 | }
|
458 | 479 |
|
@@ -613,10 +634,91 @@ static void hmat_register_target_perf(struct memory_target *target)
|
613 | 634 | node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
|
614 | 635 | }
|
615 | 636 |
|
| 637 | +static void hmat_register_target_device(struct memory_target *target, |
| 638 | + struct resource *r) |
| 639 | +{ |
| 640 | + /* define a clean / non-busy resource for the platform device */ |
| 641 | + struct resource res = { |
| 642 | + .start = r->start, |
| 643 | + .end = r->end, |
| 644 | + .flags = IORESOURCE_MEM, |
| 645 | + }; |
| 646 | + struct platform_device *pdev; |
| 647 | + struct memregion_info info; |
| 648 | + int rc, id; |
| 649 | + |
| 650 | + rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM, |
| 651 | + IORES_DESC_SOFT_RESERVED); |
| 652 | + if (rc != REGION_INTERSECTS) |
| 653 | + return; |
| 654 | + |
| 655 | + id = memregion_alloc(GFP_KERNEL); |
| 656 | + if (id < 0) { |
| 657 | + pr_err("memregion allocation failure for %pr\n", &res); |
| 658 | + return; |
| 659 | + } |
| 660 | + |
| 661 | + pdev = platform_device_alloc("hmem", id); |
| 662 | + if (!pdev) { |
| 663 | + pr_err("hmem device allocation failure for %pr\n", &res); |
| 664 | + goto out_pdev; |
| 665 | + } |
| 666 | + |
| 667 | + pdev->dev.numa_node = acpi_map_pxm_to_online_node(target->memory_pxm); |
| 668 | + info = (struct memregion_info) { |
| 669 | + .target_node = acpi_map_pxm_to_node(target->memory_pxm), |
| 670 | + }; |
| 671 | + rc = platform_device_add_data(pdev, &info, sizeof(info)); |
| 672 | + if (rc < 0) { |
| 673 | + pr_err("hmem memregion_info allocation failure for %pr\n", &res); |
| 674 | + goto out_pdev; |
| 675 | + } |
| 676 | + |
| 677 | + rc = platform_device_add_resources(pdev, &res, 1); |
| 678 | + if (rc < 0) { |
| 679 | + pr_err("hmem resource allocation failure for %pr\n", &res); |
| 680 | + goto out_resource; |
| 681 | + } |
| 682 | + |
| 683 | + rc = platform_device_add(pdev); |
| 684 | + if (rc < 0) { |
| 685 | + dev_err(&pdev->dev, "device add failed for %pr\n", &res); |
| 686 | + goto out_resource; |
| 687 | + } |
| 688 | + |
| 689 | + return; |
| 690 | + |
| 691 | +out_resource: |
| 692 | + put_device(&pdev->dev); |
| 693 | +out_pdev: |
| 694 | + memregion_free(id); |
| 695 | +} |
| 696 | + |
| 697 | +static __init void hmat_register_target_devices(struct memory_target *target) |
| 698 | +{ |
| 699 | + struct resource *res; |
| 700 | + |
| 701 | + /* |
| 702 | + * Do not bother creating devices if no driver is available to |
| 703 | + * consume them. |
| 704 | + */ |
| 705 | + if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM)) |
| 706 | + return; |
| 707 | + |
| 708 | + for (res = target->memregions.child; res; res = res->sibling) |
| 709 | + hmat_register_target_device(target, res); |
| 710 | +} |
| 711 | + |
616 | 712 | static void hmat_register_target(struct memory_target *target)
|
617 | 713 | {
|
618 | 714 | int nid = pxm_to_node(target->memory_pxm);
|
619 | 715 |
|
| 716 | + /* |
| 717 | + * Devices may belong to either an offline or online |
| 718 | + * node, so unconditionally add them. |
| 719 | + */ |
| 720 | + hmat_register_target_devices(target); |
| 721 | + |
620 | 722 | /*
|
621 | 723 | * Skip offline nodes. This can happen when memory
|
622 | 724 | * marked EFI_MEMORY_SP, "specific purpose", is applied
|
@@ -677,11 +779,21 @@ static __init void hmat_free_structures(void)
|
677 | 779 | struct target_cache *tcache, *cnext;
|
678 | 780 |
|
679 | 781 | list_for_each_entry_safe(target, tnext, &targets, node) {
|
| 782 | + struct resource *res, *res_next; |
| 783 | + |
680 | 784 | list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
|
681 | 785 | list_del(&tcache->node);
|
682 | 786 | kfree(tcache);
|
683 | 787 | }
|
| 788 | + |
684 | 789 | list_del(&target->node);
|
| 790 | + res = target->memregions.child; |
| 791 | + while (res) { |
| 792 | + res_next = res->sibling; |
| 793 | + __release_region(&target->memregions, res->start, |
| 794 | + resource_size(res)); |
| 795 | + res = res_next; |
| 796 | + } |
685 | 797 | kfree(target);
|
686 | 798 | }
|
687 | 799 |
|
|
0 commit comments