|
| 1 | +// SPDX-License-Identifier: GPL-2.0 OR MIT |
| 2 | +/* |
| 3 | + * Huge page-table-entry support for IO memory. |
| 4 | + * |
| 5 | + * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd. |
| 6 | + */ |
| 7 | +#include "vmwgfx_drv.h" |
| 8 | +#include <drm/ttm/ttm_module.h> |
| 9 | +#include <drm/ttm/ttm_bo_driver.h> |
| 10 | +#include <drm/ttm/ttm_placement.h> |
| 11 | + |
| 12 | +/** |
| 13 | + * struct vmw_thp_manager - Range manager implementing huge page alignment |
| 14 | + * |
| 15 | + * @mm: The underlying range manager. Protected by @lock. |
| 16 | + * @lock: Manager lock. |
| 17 | + */ |
| 18 | +struct vmw_thp_manager { |
| 19 | + struct drm_mm mm; |
| 20 | + spinlock_t lock; |
| 21 | +}; |
| 22 | + |
| 23 | +static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, |
| 24 | + unsigned long align_pages, |
| 25 | + const struct ttm_place *place, |
| 26 | + struct ttm_mem_reg *mem, |
| 27 | + unsigned long lpfn, |
| 28 | + enum drm_mm_insert_mode mode) |
| 29 | +{ |
| 30 | + if (align_pages >= mem->page_alignment && |
| 31 | + (!mem->page_alignment || align_pages % mem->page_alignment == 0)) { |
| 32 | + return drm_mm_insert_node_in_range(mm, node, |
| 33 | + mem->num_pages, |
| 34 | + align_pages, 0, |
| 35 | + place->fpfn, lpfn, mode); |
| 36 | + } |
| 37 | + |
| 38 | + return -ENOSPC; |
| 39 | +} |
| 40 | + |
| 41 | +static int vmw_thp_get_node(struct ttm_mem_type_manager *man, |
| 42 | + struct ttm_buffer_object *bo, |
| 43 | + const struct ttm_place *place, |
| 44 | + struct ttm_mem_reg *mem) |
| 45 | +{ |
| 46 | + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; |
| 47 | + struct drm_mm *mm = &rman->mm; |
| 48 | + struct drm_mm_node *node; |
| 49 | + unsigned long align_pages; |
| 50 | + unsigned long lpfn; |
| 51 | + enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST; |
| 52 | + int ret; |
| 53 | + |
| 54 | + node = kzalloc(sizeof(*node), GFP_KERNEL); |
| 55 | + if (!node) |
| 56 | + return -ENOMEM; |
| 57 | + |
| 58 | + lpfn = place->lpfn; |
| 59 | + if (!lpfn) |
| 60 | + lpfn = man->size; |
| 61 | + |
| 62 | + mode = DRM_MM_INSERT_BEST; |
| 63 | + if (place->flags & TTM_PL_FLAG_TOPDOWN) |
| 64 | + mode = DRM_MM_INSERT_HIGH; |
| 65 | + |
| 66 | + spin_lock(&rman->lock); |
| 67 | + if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) { |
| 68 | + align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT); |
| 69 | + if (mem->num_pages >= align_pages) { |
| 70 | + ret = vmw_thp_insert_aligned(mm, node, align_pages, |
| 71 | + place, mem, lpfn, mode); |
| 72 | + if (!ret) |
| 73 | + goto found_unlock; |
| 74 | + } |
| 75 | + } |
| 76 | + |
| 77 | + align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT); |
| 78 | + if (mem->num_pages >= align_pages) { |
| 79 | + ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem, |
| 80 | + lpfn, mode); |
| 81 | + if (!ret) |
| 82 | + goto found_unlock; |
| 83 | + } |
| 84 | + |
| 85 | + ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages, |
| 86 | + mem->page_alignment, 0, |
| 87 | + place->fpfn, lpfn, mode); |
| 88 | +found_unlock: |
| 89 | + spin_unlock(&rman->lock); |
| 90 | + |
| 91 | + if (unlikely(ret)) { |
| 92 | + kfree(node); |
| 93 | + } else { |
| 94 | + mem->mm_node = node; |
| 95 | + mem->start = node->start; |
| 96 | + } |
| 97 | + |
| 98 | + return 0; |
| 99 | +} |
| 100 | + |
| 101 | + |
| 102 | + |
| 103 | +static void vmw_thp_put_node(struct ttm_mem_type_manager *man, |
| 104 | + struct ttm_mem_reg *mem) |
| 105 | +{ |
| 106 | + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; |
| 107 | + |
| 108 | + if (mem->mm_node) { |
| 109 | + spin_lock(&rman->lock); |
| 110 | + drm_mm_remove_node(mem->mm_node); |
| 111 | + spin_unlock(&rman->lock); |
| 112 | + |
| 113 | + kfree(mem->mm_node); |
| 114 | + mem->mm_node = NULL; |
| 115 | + } |
| 116 | +} |
| 117 | + |
| 118 | +static int vmw_thp_init(struct ttm_mem_type_manager *man, |
| 119 | + unsigned long p_size) |
| 120 | +{ |
| 121 | + struct vmw_thp_manager *rman; |
| 122 | + |
| 123 | + rman = kzalloc(sizeof(*rman), GFP_KERNEL); |
| 124 | + if (!rman) |
| 125 | + return -ENOMEM; |
| 126 | + |
| 127 | + drm_mm_init(&rman->mm, 0, p_size); |
| 128 | + spin_lock_init(&rman->lock); |
| 129 | + man->priv = rman; |
| 130 | + return 0; |
| 131 | +} |
| 132 | + |
| 133 | +static int vmw_thp_takedown(struct ttm_mem_type_manager *man) |
| 134 | +{ |
| 135 | + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; |
| 136 | + struct drm_mm *mm = &rman->mm; |
| 137 | + |
| 138 | + spin_lock(&rman->lock); |
| 139 | + if (drm_mm_clean(mm)) { |
| 140 | + drm_mm_takedown(mm); |
| 141 | + spin_unlock(&rman->lock); |
| 142 | + kfree(rman); |
| 143 | + man->priv = NULL; |
| 144 | + return 0; |
| 145 | + } |
| 146 | + spin_unlock(&rman->lock); |
| 147 | + return -EBUSY; |
| 148 | +} |
| 149 | + |
| 150 | +static void vmw_thp_debug(struct ttm_mem_type_manager *man, |
| 151 | + struct drm_printer *printer) |
| 152 | +{ |
| 153 | + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; |
| 154 | + |
| 155 | + spin_lock(&rman->lock); |
| 156 | + drm_mm_print(&rman->mm, printer); |
| 157 | + spin_unlock(&rman->lock); |
| 158 | +} |
| 159 | + |
| 160 | +const struct ttm_mem_type_manager_func vmw_thp_func = { |
| 161 | + .init = vmw_thp_init, |
| 162 | + .takedown = vmw_thp_takedown, |
| 163 | + .get_node = vmw_thp_get_node, |
| 164 | + .put_node = vmw_thp_put_node, |
| 165 | + .debug = vmw_thp_debug |
| 166 | +}; |
0 commit comments