@@ -132,73 +132,12 @@ module_param(log_stats, int, 0644);
132
132
133
133
#define BLKBACK_INVALID_HANDLE (~0)
134
134
135
- /* Number of free pages to remove on each call to gnttab_free_pages */
136
- #define NUM_BATCH_FREE_PAGES 10
137
-
138
135
static inline bool persistent_gnt_timeout (struct persistent_gnt * persistent_gnt )
139
136
{
140
137
return pgrant_timeout && (jiffies - persistent_gnt -> last_used >=
141
138
HZ * pgrant_timeout );
142
139
}
143
140
144
- static inline int get_free_page (struct xen_blkif_ring * ring , struct page * * page )
145
- {
146
- unsigned long flags ;
147
-
148
- spin_lock_irqsave (& ring -> free_pages_lock , flags );
149
- if (list_empty (& ring -> free_pages )) {
150
- BUG_ON (ring -> free_pages_num != 0 );
151
- spin_unlock_irqrestore (& ring -> free_pages_lock , flags );
152
- return gnttab_alloc_pages (1 , page );
153
- }
154
- BUG_ON (ring -> free_pages_num == 0 );
155
- page [0 ] = list_first_entry (& ring -> free_pages , struct page , lru );
156
- list_del (& page [0 ]-> lru );
157
- ring -> free_pages_num -- ;
158
- spin_unlock_irqrestore (& ring -> free_pages_lock , flags );
159
-
160
- return 0 ;
161
- }
162
-
163
- static inline void put_free_pages (struct xen_blkif_ring * ring , struct page * * page ,
164
- int num )
165
- {
166
- unsigned long flags ;
167
- int i ;
168
-
169
- spin_lock_irqsave (& ring -> free_pages_lock , flags );
170
- for (i = 0 ; i < num ; i ++ )
171
- list_add (& page [i ]-> lru , & ring -> free_pages );
172
- ring -> free_pages_num += num ;
173
- spin_unlock_irqrestore (& ring -> free_pages_lock , flags );
174
- }
175
-
176
- static inline void shrink_free_pagepool (struct xen_blkif_ring * ring , int num )
177
- {
178
- /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
179
- struct page * page [NUM_BATCH_FREE_PAGES ];
180
- unsigned int num_pages = 0 ;
181
- unsigned long flags ;
182
-
183
- spin_lock_irqsave (& ring -> free_pages_lock , flags );
184
- while (ring -> free_pages_num > num ) {
185
- BUG_ON (list_empty (& ring -> free_pages ));
186
- page [num_pages ] = list_first_entry (& ring -> free_pages ,
187
- struct page , lru );
188
- list_del (& page [num_pages ]-> lru );
189
- ring -> free_pages_num -- ;
190
- if (++ num_pages == NUM_BATCH_FREE_PAGES ) {
191
- spin_unlock_irqrestore (& ring -> free_pages_lock , flags );
192
- gnttab_free_pages (num_pages , page );
193
- spin_lock_irqsave (& ring -> free_pages_lock , flags );
194
- num_pages = 0 ;
195
- }
196
- }
197
- spin_unlock_irqrestore (& ring -> free_pages_lock , flags );
198
- if (num_pages != 0 )
199
- gnttab_free_pages (num_pages , page );
200
- }
201
-
202
141
#define vaddr (page ) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
203
142
204
143
static int do_block_io_op (struct xen_blkif_ring * ring , unsigned int * eoi_flags );
@@ -331,7 +270,8 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
331
270
unmap_data .count = segs_to_unmap ;
332
271
BUG_ON (gnttab_unmap_refs_sync (& unmap_data ));
333
272
334
- put_free_pages (ring , pages , segs_to_unmap );
273
+ gnttab_page_cache_put (& ring -> free_pages , pages ,
274
+ segs_to_unmap );
335
275
segs_to_unmap = 0 ;
336
276
}
337
277
@@ -371,15 +311,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
371
311
if (++ segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ) {
372
312
unmap_data .count = segs_to_unmap ;
373
313
BUG_ON (gnttab_unmap_refs_sync (& unmap_data ));
374
- put_free_pages (ring , pages , segs_to_unmap );
314
+ gnttab_page_cache_put (& ring -> free_pages , pages ,
315
+ segs_to_unmap );
375
316
segs_to_unmap = 0 ;
376
317
}
377
318
kfree (persistent_gnt );
378
319
}
379
320
if (segs_to_unmap > 0 ) {
380
321
unmap_data .count = segs_to_unmap ;
381
322
BUG_ON (gnttab_unmap_refs_sync (& unmap_data ));
382
- put_free_pages ( ring , pages , segs_to_unmap );
323
+ gnttab_page_cache_put ( & ring -> free_pages , pages , segs_to_unmap );
383
324
}
384
325
}
385
326
@@ -664,9 +605,10 @@ int xen_blkif_schedule(void *arg)
664
605
665
606
/* Shrink the free pages pool if it is too large. */
666
607
if (time_before (jiffies , blkif -> buffer_squeeze_end ))
667
- shrink_free_pagepool ( ring , 0 );
608
+ gnttab_page_cache_shrink ( & ring -> free_pages , 0 );
668
609
else
669
- shrink_free_pagepool (ring , max_buffer_pages );
610
+ gnttab_page_cache_shrink (& ring -> free_pages ,
611
+ max_buffer_pages );
670
612
671
613
if (log_stats && time_after (jiffies , ring -> st_print ))
672
614
print_stats (ring );
@@ -697,7 +639,7 @@ void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
697
639
ring -> persistent_gnt_c = 0 ;
698
640
699
641
/* Since we are shutting down remove all pages from the buffer */
700
- shrink_free_pagepool ( ring , 0 /* All */ );
642
+ gnttab_page_cache_shrink ( & ring -> free_pages , 0 /* All */ );
701
643
}
702
644
703
645
static unsigned int xen_blkbk_unmap_prepare (
@@ -736,7 +678,7 @@ static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_
736
678
but is this the best way to deal with this? */
737
679
BUG_ON (result );
738
680
739
- put_free_pages ( ring , data -> pages , data -> count );
681
+ gnttab_page_cache_put ( & ring -> free_pages , data -> pages , data -> count );
740
682
make_response (ring , pending_req -> id ,
741
683
pending_req -> operation , pending_req -> status );
742
684
free_req (ring , pending_req );
@@ -803,7 +745,8 @@ static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
803
745
if (invcount ) {
804
746
ret = gnttab_unmap_refs (unmap , NULL , unmap_pages , invcount );
805
747
BUG_ON (ret );
806
- put_free_pages (ring , unmap_pages , invcount );
748
+ gnttab_page_cache_put (& ring -> free_pages , unmap_pages ,
749
+ invcount );
807
750
}
808
751
pages += batch ;
809
752
num -= batch ;
@@ -850,7 +793,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
850
793
pages [i ]-> page = persistent_gnt -> page ;
851
794
pages [i ]-> persistent_gnt = persistent_gnt ;
852
795
} else {
853
- if (get_free_page (ring , & pages [i ]-> page ))
796
+ if (gnttab_page_cache_get (& ring -> free_pages ,
797
+ & pages [i ]-> page ))
854
798
goto out_of_memory ;
855
799
addr = vaddr (pages [i ]-> page );
856
800
pages_to_gnt [segs_to_map ] = pages [i ]-> page ;
@@ -883,7 +827,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
883
827
BUG_ON (new_map_idx >= segs_to_map );
884
828
if (unlikely (map [new_map_idx ].status != 0 )) {
885
829
pr_debug ("invalid buffer -- could not remap it\n" );
886
- put_free_pages (ring , & pages [seg_idx ]-> page , 1 );
830
+ gnttab_page_cache_put (& ring -> free_pages ,
831
+ & pages [seg_idx ]-> page , 1 );
887
832
pages [seg_idx ]-> handle = BLKBACK_INVALID_HANDLE ;
888
833
ret |= 1 ;
889
834
goto next ;
@@ -944,7 +889,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
944
889
945
890
out_of_memory :
946
891
pr_alert ("%s: out of memory\n" , __func__ );
947
- put_free_pages ( ring , pages_to_gnt , segs_to_map );
892
+ gnttab_page_cache_put ( & ring -> free_pages , pages_to_gnt , segs_to_map );
948
893
for (i = last_map ; i < num ; i ++ )
949
894
pages [i ]-> handle = BLKBACK_INVALID_HANDLE ;
950
895
return - ENOMEM ;
0 commit comments