@@ -114,20 +114,10 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
114
114
mempool_t drbd_request_mempool ;
115
115
mempool_t drbd_ee_mempool ;
116
116
mempool_t drbd_md_io_page_pool ;
117
+ mempool_t drbd_buffer_page_pool ;
117
118
struct bio_set drbd_md_io_bio_set ;
118
119
struct bio_set drbd_io_bio_set ;
119
120
120
- /* I do not use a standard mempool, because:
121
- 1) I want to hand out the pre-allocated objects first.
122
- 2) I want to be able to interrupt sleeping allocation with a signal.
123
- Note: This is a single linked list, the next pointer is the private
124
- member of struct page.
125
- */
126
- struct page * drbd_pp_pool ;
127
- DEFINE_SPINLOCK (drbd_pp_lock );
128
- int drbd_pp_vacant ;
129
- wait_queue_head_t drbd_pp_wait ;
130
-
131
121
DEFINE_RATELIMIT_STATE (drbd_ratelimit_state , 5 * HZ , 5 );
132
122
133
123
static const struct block_device_operations drbd_ops = {
@@ -1611,6 +1601,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
1611
1601
static int _drbd_send_zc_ee (struct drbd_peer_device * peer_device ,
1612
1602
struct drbd_peer_request * peer_req )
1613
1603
{
1604
+ bool use_sendpage = !(peer_req -> flags & EE_RELEASE_TO_MEMPOOL );
1614
1605
struct page * page = peer_req -> pages ;
1615
1606
unsigned len = peer_req -> i .size ;
1616
1607
int err ;
@@ -1619,8 +1610,13 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
1619
1610
page_chain_for_each (page ) {
1620
1611
unsigned l = min_t (unsigned , len , PAGE_SIZE );
1621
1612
1622
- err = _drbd_send_page (peer_device , page , 0 , l ,
1623
- page_chain_next (page ) ? MSG_MORE : 0 );
1613
+ if (likely (use_sendpage ))
1614
+ err = _drbd_send_page (peer_device , page , 0 , l ,
1615
+ page_chain_next (page ) ? MSG_MORE : 0 );
1616
+ else
1617
+ err = _drbd_no_send_page (peer_device , page , 0 , l ,
1618
+ page_chain_next (page ) ? MSG_MORE : 0 );
1619
+
1624
1620
if (err )
1625
1621
return err ;
1626
1622
len -= l ;
@@ -1962,7 +1958,6 @@ void drbd_init_set_defaults(struct drbd_device *device)
1962
1958
INIT_LIST_HEAD (& device -> sync_ee );
1963
1959
INIT_LIST_HEAD (& device -> done_ee );
1964
1960
INIT_LIST_HEAD (& device -> read_ee );
1965
- INIT_LIST_HEAD (& device -> net_ee );
1966
1961
INIT_LIST_HEAD (& device -> resync_reads );
1967
1962
INIT_LIST_HEAD (& device -> resync_work .list );
1968
1963
INIT_LIST_HEAD (& device -> unplug_work .list );
@@ -2043,7 +2038,6 @@ void drbd_device_cleanup(struct drbd_device *device)
2043
2038
D_ASSERT (device , list_empty (& device -> sync_ee ));
2044
2039
D_ASSERT (device , list_empty (& device -> done_ee ));
2045
2040
D_ASSERT (device , list_empty (& device -> read_ee ));
2046
- D_ASSERT (device , list_empty (& device -> net_ee ));
2047
2041
D_ASSERT (device , list_empty (& device -> resync_reads ));
2048
2042
D_ASSERT (device , list_empty (& first_peer_device (device )-> connection -> sender_work .q ));
2049
2043
D_ASSERT (device , list_empty (& device -> resync_work .list ));
@@ -2055,19 +2049,11 @@ void drbd_device_cleanup(struct drbd_device *device)
2055
2049
2056
2050
static void drbd_destroy_mempools (void )
2057
2051
{
2058
- struct page * page ;
2059
-
2060
- while (drbd_pp_pool ) {
2061
- page = drbd_pp_pool ;
2062
- drbd_pp_pool = (struct page * )page_private (page );
2063
- __free_page (page );
2064
- drbd_pp_vacant -- ;
2065
- }
2066
-
2067
2052
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2068
2053
2069
2054
bioset_exit (& drbd_io_bio_set );
2070
2055
bioset_exit (& drbd_md_io_bio_set );
2056
+ mempool_exit (& drbd_buffer_page_pool );
2071
2057
mempool_exit (& drbd_md_io_page_pool );
2072
2058
mempool_exit (& drbd_ee_mempool );
2073
2059
mempool_exit (& drbd_request_mempool );
@@ -2086,9 +2072,8 @@ static void drbd_destroy_mempools(void)
2086
2072
2087
2073
static int drbd_create_mempools (void )
2088
2074
{
2089
- struct page * page ;
2090
2075
const int number = (DRBD_MAX_BIO_SIZE /PAGE_SIZE ) * drbd_minor_count ;
2091
- int i , ret ;
2076
+ int ret ;
2092
2077
2093
2078
/* caches */
2094
2079
drbd_request_cache = kmem_cache_create (
@@ -2125,6 +2110,10 @@ static int drbd_create_mempools(void)
2125
2110
if (ret )
2126
2111
goto Enomem ;
2127
2112
2113
+ ret = mempool_init_page_pool (& drbd_buffer_page_pool , number , 0 );
2114
+ if (ret )
2115
+ goto Enomem ;
2116
+
2128
2117
ret = mempool_init_slab_pool (& drbd_request_mempool , number ,
2129
2118
drbd_request_cache );
2130
2119
if (ret )
@@ -2134,15 +2123,6 @@ static int drbd_create_mempools(void)
2134
2123
if (ret )
2135
2124
goto Enomem ;
2136
2125
2137
- for (i = 0 ; i < number ; i ++ ) {
2138
- page = alloc_page (GFP_HIGHUSER );
2139
- if (!page )
2140
- goto Enomem ;
2141
- set_page_private (page , (unsigned long )drbd_pp_pool );
2142
- drbd_pp_pool = page ;
2143
- }
2144
- drbd_pp_vacant = number ;
2145
-
2146
2126
return 0 ;
2147
2127
2148
2128
Enomem :
@@ -2169,10 +2149,6 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
2169
2149
rr = drbd_free_peer_reqs (device , & device -> done_ee );
2170
2150
if (rr )
2171
2151
drbd_err (device , "%d EEs in done list found!\n" , rr );
2172
-
2173
- rr = drbd_free_peer_reqs (device , & device -> net_ee );
2174
- if (rr )
2175
- drbd_err (device , "%d EEs in net list found!\n" , rr );
2176
2152
}
2177
2153
2178
2154
/* caution. no locking. */
@@ -2863,11 +2839,6 @@ static int __init drbd_init(void)
2863
2839
return err ;
2864
2840
}
2865
2841
2866
- /*
2867
- * allocate all necessary structs
2868
- */
2869
- init_waitqueue_head (& drbd_pp_wait );
2870
-
2871
2842
drbd_proc = NULL ; /* play safe for drbd_cleanup */
2872
2843
idr_init (& drbd_devices );
2873
2844
0 commit comments