@@ -114,20 +114,10 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
114114mempool_t drbd_request_mempool ;
115115mempool_t drbd_ee_mempool ;
116116mempool_t drbd_md_io_page_pool ;
117+ mempool_t drbd_buffer_page_pool ;
117118struct bio_set drbd_md_io_bio_set ;
118119struct bio_set drbd_io_bio_set ;
119120
120- /* I do not use a standard mempool, because:
121- 1) I want to hand out the pre-allocated objects first.
122- 2) I want to be able to interrupt sleeping allocation with a signal.
123- Note: This is a single linked list, the next pointer is the private
124- member of struct page.
125- */
126- struct page * drbd_pp_pool ;
127- DEFINE_SPINLOCK (drbd_pp_lock );
128- int drbd_pp_vacant ;
129- wait_queue_head_t drbd_pp_wait ;
130-
131121DEFINE_RATELIMIT_STATE (drbd_ratelimit_state , 5 * HZ , 5 );
132122
133123static const struct block_device_operations drbd_ops = {
@@ -1611,6 +1601,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
16111601static int _drbd_send_zc_ee (struct drbd_peer_device * peer_device ,
16121602 struct drbd_peer_request * peer_req )
16131603{
1604+ bool use_sendpage = !(peer_req -> flags & EE_RELEASE_TO_MEMPOOL );
16141605 struct page * page = peer_req -> pages ;
16151606 unsigned len = peer_req -> i .size ;
16161607 int err ;
@@ -1619,8 +1610,13 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
16191610 page_chain_for_each (page ) {
16201611 unsigned l = min_t (unsigned , len , PAGE_SIZE );
16211612
1622- err = _drbd_send_page (peer_device , page , 0 , l ,
1623- page_chain_next (page ) ? MSG_MORE : 0 );
1613+ if (likely (use_sendpage ))
1614+ err = _drbd_send_page (peer_device , page , 0 , l ,
1615+ page_chain_next (page ) ? MSG_MORE : 0 );
1616+ else
1617+ err = _drbd_no_send_page (peer_device , page , 0 , l ,
1618+ page_chain_next (page ) ? MSG_MORE : 0 );
1619+
16241620 if (err )
16251621 return err ;
16261622 len -= l ;
@@ -1962,7 +1958,6 @@ void drbd_init_set_defaults(struct drbd_device *device)
19621958 INIT_LIST_HEAD (& device -> sync_ee );
19631959 INIT_LIST_HEAD (& device -> done_ee );
19641960 INIT_LIST_HEAD (& device -> read_ee );
1965- INIT_LIST_HEAD (& device -> net_ee );
19661961 INIT_LIST_HEAD (& device -> resync_reads );
19671962 INIT_LIST_HEAD (& device -> resync_work .list );
19681963 INIT_LIST_HEAD (& device -> unplug_work .list );
@@ -2043,7 +2038,6 @@ void drbd_device_cleanup(struct drbd_device *device)
20432038 D_ASSERT (device , list_empty (& device -> sync_ee ));
20442039 D_ASSERT (device , list_empty (& device -> done_ee ));
20452040 D_ASSERT (device , list_empty (& device -> read_ee ));
2046- D_ASSERT (device , list_empty (& device -> net_ee ));
20472041 D_ASSERT (device , list_empty (& device -> resync_reads ));
20482042 D_ASSERT (device , list_empty (& first_peer_device (device )-> connection -> sender_work .q ));
20492043 D_ASSERT (device , list_empty (& device -> resync_work .list ));
@@ -2055,19 +2049,11 @@ void drbd_device_cleanup(struct drbd_device *device)
20552049
20562050static void drbd_destroy_mempools (void )
20572051{
2058- struct page * page ;
2059-
2060- while (drbd_pp_pool ) {
2061- page = drbd_pp_pool ;
2062- drbd_pp_pool = (struct page * )page_private (page );
2063- __free_page (page );
2064- drbd_pp_vacant -- ;
2065- }
2066-
20672052 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
20682053
20692054 bioset_exit (& drbd_io_bio_set );
20702055 bioset_exit (& drbd_md_io_bio_set );
2056+ mempool_exit (& drbd_buffer_page_pool );
20712057 mempool_exit (& drbd_md_io_page_pool );
20722058 mempool_exit (& drbd_ee_mempool );
20732059 mempool_exit (& drbd_request_mempool );
@@ -2086,9 +2072,8 @@ static void drbd_destroy_mempools(void)
20862072
20872073static int drbd_create_mempools (void )
20882074{
2089- struct page * page ;
20902075 const int number = (DRBD_MAX_BIO_SIZE /PAGE_SIZE ) * drbd_minor_count ;
2091- int i , ret ;
2076+ int ret ;
20922077
20932078 /* caches */
20942079 drbd_request_cache = kmem_cache_create (
@@ -2125,6 +2110,10 @@ static int drbd_create_mempools(void)
21252110 if (ret )
21262111 goto Enomem ;
21272112
2113+ ret = mempool_init_page_pool (& drbd_buffer_page_pool , number , 0 );
2114+ if (ret )
2115+ goto Enomem ;
2116+
21282117 ret = mempool_init_slab_pool (& drbd_request_mempool , number ,
21292118 drbd_request_cache );
21302119 if (ret )
@@ -2134,15 +2123,6 @@ static int drbd_create_mempools(void)
21342123 if (ret )
21352124 goto Enomem ;
21362125
2137- for (i = 0 ; i < number ; i ++ ) {
2138- page = alloc_page (GFP_HIGHUSER );
2139- if (!page )
2140- goto Enomem ;
2141- set_page_private (page , (unsigned long )drbd_pp_pool );
2142- drbd_pp_pool = page ;
2143- }
2144- drbd_pp_vacant = number ;
2145-
21462126 return 0 ;
21472127
21482128Enomem :
@@ -2169,10 +2149,6 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
21692149 rr = drbd_free_peer_reqs (device , & device -> done_ee );
21702150 if (rr )
21712151 drbd_err (device , "%d EEs in done list found!\n" , rr );
2172-
2173- rr = drbd_free_peer_reqs (device , & device -> net_ee );
2174- if (rr )
2175- drbd_err (device , "%d EEs in net list found!\n" , rr );
21762152}
21772153
21782154/* caution. no locking. */
@@ -2863,11 +2839,6 @@ static int __init drbd_init(void)
28632839 return err ;
28642840 }
28652841
2866- /*
2867- * allocate all necessary structs
2868- */
2869- init_waitqueue_head (& drbd_pp_wait );
2870-
28712842 drbd_proc = NULL ; /* play safe for drbd_cleanup */
28722843 idr_init (& drbd_devices );
28732844
0 commit comments