@@ -1509,6 +1509,7 @@ hwloc__find_insert_memory_parent(struct hwloc_topology *topology, hwloc_obj_t ob
1509
1509
return group ;
1510
1510
}
1511
1511
1512
+ /* only works for MEMCACHE and NUMAnode with a single bit in nodeset */
1512
1513
static hwloc_obj_t
1513
1514
hwloc___attach_memory_object_by_nodeset (struct hwloc_topology * topology , hwloc_obj_t parent ,
1514
1515
hwloc_obj_t obj ,
@@ -1533,19 +1534,47 @@ hwloc___attach_memory_object_by_nodeset(struct hwloc_topology *topology, hwloc_o
1533
1534
1534
1535
if (first == curfirst ) {
1535
1536
/* identical nodeset */
1536
- assert (obj -> type == HWLOC_OBJ_NUMANODE );
1537
- assert (cur -> type == HWLOC_OBJ_NUMANODE );
1538
- /* identical NUMA nodes? ignore the new one */
1539
- if (report_error ) {
1540
- char curstr [512 ];
1541
- char objstr [512 ];
1542
- char msg [1100 ];
1543
- hwloc__report_error_format_obj (curstr , sizeof (curstr ), cur );
1544
- hwloc__report_error_format_obj (objstr , sizeof (objstr ), obj );
1545
- snprintf (msg , sizeof (msg ), "%s and %s have identical nodesets!" , objstr , curstr );
1546
- report_error (msg , __LINE__ );
1537
+ if (obj -> type == HWLOC_OBJ_NUMANODE ) {
1538
+ if (cur -> type == HWLOC_OBJ_NUMANODE ) {
1539
+ /* identical NUMA nodes? ignore the new one */
1540
+ if (report_error ) {
1541
+ char curstr [512 ];
1542
+ char objstr [512 ];
1543
+ char msg [1100 ];
1544
+ hwloc__report_error_format_obj (curstr , sizeof (curstr ), cur );
1545
+ hwloc__report_error_format_obj (objstr , sizeof (objstr ), obj );
1546
+ snprintf (msg , sizeof (msg ), "%s and %s have identical nodesets!" , objstr , curstr );
1547
+ report_error (msg , __LINE__ );
1548
+ }
1549
+ return NULL ;
1550
+ }
1551
+ assert (cur -> type == HWLOC_OBJ_MEMCACHE );
1552
+ /* insert the new NUMA node below that existing memcache */
1553
+ return hwloc___attach_memory_object_by_nodeset (topology , cur , obj , report_error );
1554
+
1555
+ } else {
1556
+ assert (obj -> type == HWLOC_OBJ_MEMCACHE );
1557
+ if (cur -> type == HWLOC_OBJ_MEMCACHE ) {
1558
+ if (cur -> attr -> cache .depth == obj -> attr -> cache .depth )
1559
+ /* memcache with same nodeset and depth, ignore the new one */
1560
+ return NULL ;
1561
+ if (cur -> attr -> cache .depth > obj -> attr -> cache .depth )
1562
+ /* memcache with higher cache depth is actually *higher* in the hierarchy
1563
+ * (depth starts from the NUMA node).
1564
+ * insert the new memcache below the existing one
1565
+ */
1566
+ return hwloc___attach_memory_object_by_nodeset (topology , cur , obj , report_error );
1567
+ }
1568
+ /* insert the memcache above the existing memcache or numa node */
1569
+ obj -> next_sibling = cur -> next_sibling ;
1570
+ cur -> next_sibling = NULL ;
1571
+ obj -> memory_first_child = cur ;
1572
+ cur -> parent = obj ;
1573
+ * curp = obj ;
1574
+ obj -> parent = parent ;
1575
+ topology -> modified = 1 ;
1576
+ return obj ;
1547
1577
}
1548
- return NULL ;
1549
1578
}
1550
1579
1551
1580
curp = & cur -> next_sibling ;
@@ -1587,6 +1616,8 @@ hwloc__attach_memory_object(struct hwloc_topology *topology, hwloc_obj_t parent,
1587
1616
} else if (!hwloc_bitmap_isincluded (obj -> nodeset , obj -> complete_nodeset )) {
1588
1617
return NULL ;
1589
1618
}
1619
+ /* Neither ACPI nor Linux support multinode mscache */
1620
+ assert (hwloc_bitmap_weight (obj -> nodeset ) == 1 );
1590
1621
1591
1622
#if 0
1592
1623
/* TODO: enable this instead of hack in fixup_sets once NUMA nodes are inserted late */
@@ -1599,9 +1630,6 @@ hwloc__attach_memory_object(struct hwloc_topology *topology, hwloc_obj_t parent,
1599
1630
hwloc_bitmap_copy (obj -> complete_cpuset , parent -> complete_cpuset );
1600
1631
#endif
1601
1632
1602
- /* only NUMA nodes are memory for now, just append to the end of the list */
1603
- assert (obj -> type == HWLOC_OBJ_NUMANODE );
1604
-
1605
1633
result = hwloc___attach_memory_object_by_nodeset (topology , parent , obj , report_error );
1606
1634
if (result == obj ) {
1607
1635
/* Add the bit to the top sets, and to the parent CPU-side object */
0 commit comments