Skip to content

Commit ca33479

Browse files
committed
xen: add helpers for caching grant mapping pages
Instead of having similar helpers in multiple backend drivers use common helpers for caching pages allocated via gnttab_alloc_pages(). Make use of those helpers in blkback and scsiback. Cc: <[email protected]> # 5.9 Signed-off-by: Juergen Gross <[email protected]> Reviewed-by: Boris Ostrovsky <[email protected]> Signed-off-by: Juergen Gross <[email protected]>
1 parent a68a026 commit ca33479

File tree

6 files changed

+116
-128
lines changed

6 files changed

+116
-128
lines changed

drivers/block/xen-blkback/blkback.c

Lines changed: 17 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -132,73 +132,12 @@ module_param(log_stats, int, 0644);
132132

133133
#define BLKBACK_INVALID_HANDLE (~0)
134134

135-
/* Number of free pages to remove on each call to gnttab_free_pages */
136-
#define NUM_BATCH_FREE_PAGES 10
137-
138135
static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
139136
{
140137
return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
141138
HZ * pgrant_timeout);
142139
}
143140

144-
static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
145-
{
146-
unsigned long flags;
147-
148-
spin_lock_irqsave(&ring->free_pages_lock, flags);
149-
if (list_empty(&ring->free_pages)) {
150-
BUG_ON(ring->free_pages_num != 0);
151-
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
152-
return gnttab_alloc_pages(1, page);
153-
}
154-
BUG_ON(ring->free_pages_num == 0);
155-
page[0] = list_first_entry(&ring->free_pages, struct page, lru);
156-
list_del(&page[0]->lru);
157-
ring->free_pages_num--;
158-
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
159-
160-
return 0;
161-
}
162-
163-
static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
164-
int num)
165-
{
166-
unsigned long flags;
167-
int i;
168-
169-
spin_lock_irqsave(&ring->free_pages_lock, flags);
170-
for (i = 0; i < num; i++)
171-
list_add(&page[i]->lru, &ring->free_pages);
172-
ring->free_pages_num += num;
173-
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
174-
}
175-
176-
static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
177-
{
178-
/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
179-
struct page *page[NUM_BATCH_FREE_PAGES];
180-
unsigned int num_pages = 0;
181-
unsigned long flags;
182-
183-
spin_lock_irqsave(&ring->free_pages_lock, flags);
184-
while (ring->free_pages_num > num) {
185-
BUG_ON(list_empty(&ring->free_pages));
186-
page[num_pages] = list_first_entry(&ring->free_pages,
187-
struct page, lru);
188-
list_del(&page[num_pages]->lru);
189-
ring->free_pages_num--;
190-
if (++num_pages == NUM_BATCH_FREE_PAGES) {
191-
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
192-
gnttab_free_pages(num_pages, page);
193-
spin_lock_irqsave(&ring->free_pages_lock, flags);
194-
num_pages = 0;
195-
}
196-
}
197-
spin_unlock_irqrestore(&ring->free_pages_lock, flags);
198-
if (num_pages != 0)
199-
gnttab_free_pages(num_pages, page);
200-
}
201-
202141
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
203142

204143
static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
@@ -331,7 +270,8 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
331270
unmap_data.count = segs_to_unmap;
332271
BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
333272

334-
put_free_pages(ring, pages, segs_to_unmap);
273+
gnttab_page_cache_put(&ring->free_pages, pages,
274+
segs_to_unmap);
335275
segs_to_unmap = 0;
336276
}
337277

@@ -371,15 +311,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
371311
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
372312
unmap_data.count = segs_to_unmap;
373313
BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
374-
put_free_pages(ring, pages, segs_to_unmap);
314+
gnttab_page_cache_put(&ring->free_pages, pages,
315+
segs_to_unmap);
375316
segs_to_unmap = 0;
376317
}
377318
kfree(persistent_gnt);
378319
}
379320
if (segs_to_unmap > 0) {
380321
unmap_data.count = segs_to_unmap;
381322
BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
382-
put_free_pages(ring, pages, segs_to_unmap);
323+
gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap);
383324
}
384325
}
385326

@@ -664,9 +605,10 @@ int xen_blkif_schedule(void *arg)
664605

665606
/* Shrink the free pages pool if it is too large. */
666607
if (time_before(jiffies, blkif->buffer_squeeze_end))
667-
shrink_free_pagepool(ring, 0);
608+
gnttab_page_cache_shrink(&ring->free_pages, 0);
668609
else
669-
shrink_free_pagepool(ring, max_buffer_pages);
610+
gnttab_page_cache_shrink(&ring->free_pages,
611+
max_buffer_pages);
670612

671613
if (log_stats && time_after(jiffies, ring->st_print))
672614
print_stats(ring);
@@ -697,7 +639,7 @@ void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
697639
ring->persistent_gnt_c = 0;
698640

699641
/* Since we are shutting down remove all pages from the buffer */
700-
shrink_free_pagepool(ring, 0 /* All */);
642+
gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
701643
}
702644

703645
static unsigned int xen_blkbk_unmap_prepare(
@@ -736,7 +678,7 @@ static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_
736678
but is this the best way to deal with this? */
737679
BUG_ON(result);
738680

739-
put_free_pages(ring, data->pages, data->count);
681+
gnttab_page_cache_put(&ring->free_pages, data->pages, data->count);
740682
make_response(ring, pending_req->id,
741683
pending_req->operation, pending_req->status);
742684
free_req(ring, pending_req);
@@ -803,7 +745,8 @@ static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
803745
if (invcount) {
804746
ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
805747
BUG_ON(ret);
806-
put_free_pages(ring, unmap_pages, invcount);
748+
gnttab_page_cache_put(&ring->free_pages, unmap_pages,
749+
invcount);
807750
}
808751
pages += batch;
809752
num -= batch;
@@ -850,7 +793,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
850793
pages[i]->page = persistent_gnt->page;
851794
pages[i]->persistent_gnt = persistent_gnt;
852795
} else {
853-
if (get_free_page(ring, &pages[i]->page))
796+
if (gnttab_page_cache_get(&ring->free_pages,
797+
&pages[i]->page))
854798
goto out_of_memory;
855799
addr = vaddr(pages[i]->page);
856800
pages_to_gnt[segs_to_map] = pages[i]->page;
@@ -883,7 +827,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
883827
BUG_ON(new_map_idx >= segs_to_map);
884828
if (unlikely(map[new_map_idx].status != 0)) {
885829
pr_debug("invalid buffer -- could not remap it\n");
886-
put_free_pages(ring, &pages[seg_idx]->page, 1);
830+
gnttab_page_cache_put(&ring->free_pages,
831+
&pages[seg_idx]->page, 1);
887832
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
888833
ret |= 1;
889834
goto next;
@@ -944,7 +889,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
944889

945890
out_of_memory:
946891
pr_alert("%s: out of memory\n", __func__);
947-
put_free_pages(ring, pages_to_gnt, segs_to_map);
892+
gnttab_page_cache_put(&ring->free_pages, pages_to_gnt, segs_to_map);
948893
for (i = last_map; i < num; i++)
949894
pages[i]->handle = BLKBACK_INVALID_HANDLE;
950895
return -ENOMEM;

drivers/block/xen-blkback/common.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -288,9 +288,7 @@ struct xen_blkif_ring {
288288
struct work_struct persistent_purge_work;
289289

290290
/* Buffer of free pages to map grant refs. */
291-
spinlock_t free_pages_lock;
292-
int free_pages_num;
293-
struct list_head free_pages;
291+
struct gnttab_page_cache free_pages;
294292

295293
struct work_struct free_work;
296294
/* Thread shutdown wait queue. */

drivers/block/xen-blkback/xenbus.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -144,8 +144,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
144144
INIT_LIST_HEAD(&ring->pending_free);
145145
INIT_LIST_HEAD(&ring->persistent_purge_list);
146146
INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
147-
spin_lock_init(&ring->free_pages_lock);
148-
INIT_LIST_HEAD(&ring->free_pages);
147+
gnttab_page_cache_init(&ring->free_pages);
149148

150149
spin_lock_init(&ring->pending_free_lock);
151150
init_waitqueue_head(&ring->pending_free_wq);
@@ -317,8 +316,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
317316
BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
318317
BUG_ON(!list_empty(&ring->persistent_purge_list));
319318
BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
320-
BUG_ON(!list_empty(&ring->free_pages));
321-
BUG_ON(ring->free_pages_num != 0);
319+
BUG_ON(ring->free_pages.num_pages != 0);
322320
BUG_ON(ring->persistent_gnt_c != 0);
323321
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
324322
ring->active = false;

drivers/xen/grant-table.c

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -813,6 +813,78 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
813813
}
814814
EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
815815

816+
void gnttab_page_cache_init(struct gnttab_page_cache *cache)
817+
{
818+
spin_lock_init(&cache->lock);
819+
INIT_LIST_HEAD(&cache->pages);
820+
cache->num_pages = 0;
821+
}
822+
EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
823+
824+
int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
825+
{
826+
unsigned long flags;
827+
828+
spin_lock_irqsave(&cache->lock, flags);
829+
830+
if (list_empty(&cache->pages)) {
831+
spin_unlock_irqrestore(&cache->lock, flags);
832+
return gnttab_alloc_pages(1, page);
833+
}
834+
835+
page[0] = list_first_entry(&cache->pages, struct page, lru);
836+
list_del(&page[0]->lru);
837+
cache->num_pages--;
838+
839+
spin_unlock_irqrestore(&cache->lock, flags);
840+
841+
return 0;
842+
}
843+
EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
844+
845+
void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
846+
unsigned int num)
847+
{
848+
unsigned long flags;
849+
unsigned int i;
850+
851+
spin_lock_irqsave(&cache->lock, flags);
852+
853+
for (i = 0; i < num; i++)
854+
list_add(&page[i]->lru, &cache->pages);
855+
cache->num_pages += num;
856+
857+
spin_unlock_irqrestore(&cache->lock, flags);
858+
}
859+
EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
860+
861+
void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
862+
{
863+
struct page *page[10];
864+
unsigned int i = 0;
865+
unsigned long flags;
866+
867+
spin_lock_irqsave(&cache->lock, flags);
868+
869+
while (cache->num_pages > num) {
870+
page[i] = list_first_entry(&cache->pages, struct page, lru);
871+
list_del(&page[i]->lru);
872+
cache->num_pages--;
873+
if (++i == ARRAY_SIZE(page)) {
874+
spin_unlock_irqrestore(&cache->lock, flags);
875+
gnttab_free_pages(i, page);
876+
i = 0;
877+
spin_lock_irqsave(&cache->lock, flags);
878+
}
879+
}
880+
881+
spin_unlock_irqrestore(&cache->lock, flags);
882+
883+
if (i != 0)
884+
gnttab_free_pages(i, page);
885+
}
886+
EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
887+
816888
void gnttab_pages_clear_private(int nr_pages, struct page **pages)
817889
{
818890
int i;

0 commit comments

Comments
 (0)