23
23
#include "core.h"
24
24
#include "card.h"
25
25
26
- #define MMC_QUEUE_BOUNCESZ 65536
27
-
28
26
/*
29
27
* Prepare a MMC request. This just filters out odd stuff.
30
28
*/
@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
150
148
queue_flag_set_unlocked (QUEUE_FLAG_SECERASE , q );
151
149
}
152
150
153
- static unsigned int mmc_queue_calc_bouncesz (struct mmc_host * host )
154
- {
155
- unsigned int bouncesz = MMC_QUEUE_BOUNCESZ ;
156
-
157
- if (host -> max_segs != 1 || (host -> caps & MMC_CAP_NO_BOUNCE_BUFF ))
158
- return 0 ;
159
-
160
- if (bouncesz > host -> max_req_size )
161
- bouncesz = host -> max_req_size ;
162
- if (bouncesz > host -> max_seg_size )
163
- bouncesz = host -> max_seg_size ;
164
- if (bouncesz > host -> max_blk_count * 512 )
165
- bouncesz = host -> max_blk_count * 512 ;
166
-
167
- if (bouncesz <= 512 )
168
- return 0 ;
169
-
170
- return bouncesz ;
171
- }
172
-
173
151
/**
174
152
* mmc_init_request() - initialize the MMC-specific per-request data
175
153
* @q: the request queue
@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
184
162
struct mmc_card * card = mq -> card ;
185
163
struct mmc_host * host = card -> host ;
186
164
187
- if (card -> bouncesz ) {
188
- mq_rq -> bounce_buf = kmalloc (card -> bouncesz , gfp );
189
- if (!mq_rq -> bounce_buf )
190
- return - ENOMEM ;
191
- if (card -> bouncesz > 512 ) {
192
- mq_rq -> sg = mmc_alloc_sg (1 , gfp );
193
- if (!mq_rq -> sg )
194
- return - ENOMEM ;
195
- mq_rq -> bounce_sg = mmc_alloc_sg (card -> bouncesz / 512 ,
196
- gfp );
197
- if (!mq_rq -> bounce_sg )
198
- return - ENOMEM ;
199
- }
200
- } else {
201
- mq_rq -> bounce_buf = NULL ;
202
- mq_rq -> bounce_sg = NULL ;
203
- mq_rq -> sg = mmc_alloc_sg (host -> max_segs , gfp );
204
- if (!mq_rq -> sg )
205
- return - ENOMEM ;
206
- }
165
+ mq_rq -> sg = mmc_alloc_sg (host -> max_segs , gfp );
166
+ if (!mq_rq -> sg )
167
+ return - ENOMEM ;
207
168
208
169
return 0 ;
209
170
}
@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
212
173
{
213
174
struct mmc_queue_req * mq_rq = req_to_mmc_queue_req (req );
214
175
215
- /* It is OK to kfree(NULL) so this will be smooth */
216
- kfree (mq_rq -> bounce_sg );
217
- mq_rq -> bounce_sg = NULL ;
218
-
219
- kfree (mq_rq -> bounce_buf );
220
- mq_rq -> bounce_buf = NULL ;
221
-
222
176
kfree (mq_rq -> sg );
223
177
mq_rq -> sg = NULL ;
224
178
}
@@ -242,12 +196,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
242
196
if (mmc_dev (host )-> dma_mask && * mmc_dev (host )-> dma_mask )
243
197
limit = (u64 )dma_max_pfn (mmc_dev (host )) << PAGE_SHIFT ;
244
198
245
- /*
246
- * mmc_init_request() depends on card->bouncesz so it must be calculated
247
- * before blk_init_allocated_queue() starts allocating requests.
248
- */
249
- card -> bouncesz = mmc_queue_calc_bouncesz (host );
250
-
251
199
mq -> card = card ;
252
200
mq -> queue = blk_alloc_queue (GFP_KERNEL );
253
201
if (!mq -> queue )
@@ -271,17 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
271
219
if (mmc_can_erase (card ))
272
220
mmc_queue_setup_discard (mq -> queue , card );
273
221
274
- if (card -> bouncesz ) {
275
- blk_queue_max_hw_sectors (mq -> queue , card -> bouncesz / 512 );
276
- blk_queue_max_segments (mq -> queue , card -> bouncesz / 512 );
277
- blk_queue_max_segment_size (mq -> queue , card -> bouncesz );
278
- } else {
279
- blk_queue_bounce_limit (mq -> queue , limit );
280
- blk_queue_max_hw_sectors (mq -> queue ,
281
- min (host -> max_blk_count , host -> max_req_size / 512 ));
282
- blk_queue_max_segments (mq -> queue , host -> max_segs );
283
- blk_queue_max_segment_size (mq -> queue , host -> max_seg_size );
284
- }
222
+ blk_queue_bounce_limit (mq -> queue , limit );
223
+ blk_queue_max_hw_sectors (mq -> queue ,
224
+ min (host -> max_blk_count , host -> max_req_size / 512 ));
225
+ blk_queue_max_segments (mq -> queue , host -> max_segs );
226
+ blk_queue_max_segment_size (mq -> queue , host -> max_seg_size );
285
227
286
228
sema_init (& mq -> thread_sem , 1 );
287
229
@@ -370,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq)
370
312
*/
371
313
unsigned int mmc_queue_map_sg (struct mmc_queue * mq , struct mmc_queue_req * mqrq )
372
314
{
373
- unsigned int sg_len ;
374
- size_t buflen ;
375
- struct scatterlist * sg ;
376
315
struct request * req = mmc_queue_req_to_req (mqrq );
377
- int i ;
378
-
379
- if (!mqrq -> bounce_buf )
380
- return blk_rq_map_sg (mq -> queue , req , mqrq -> sg );
381
-
382
- sg_len = blk_rq_map_sg (mq -> queue , req , mqrq -> bounce_sg );
383
-
384
- mqrq -> bounce_sg_len = sg_len ;
385
-
386
- buflen = 0 ;
387
- for_each_sg (mqrq -> bounce_sg , sg , sg_len , i )
388
- buflen += sg -> length ;
389
-
390
- sg_init_one (mqrq -> sg , mqrq -> bounce_buf , buflen );
391
-
392
- return 1 ;
393
- }
394
-
395
- /*
396
- * If writing, bounce the data to the buffer before the request
397
- * is sent to the host driver
398
- */
399
- void mmc_queue_bounce_pre (struct mmc_queue_req * mqrq )
400
- {
401
- if (!mqrq -> bounce_buf )
402
- return ;
403
-
404
- if (rq_data_dir (mmc_queue_req_to_req (mqrq )) != WRITE )
405
- return ;
406
-
407
- sg_copy_to_buffer (mqrq -> bounce_sg , mqrq -> bounce_sg_len ,
408
- mqrq -> bounce_buf , mqrq -> sg [0 ].length );
409
- }
410
-
411
- /*
412
- * If reading, bounce the data from the buffer after the request
413
- * has been handled by the host driver
414
- */
415
- void mmc_queue_bounce_post (struct mmc_queue_req * mqrq )
416
- {
417
- if (!mqrq -> bounce_buf )
418
- return ;
419
-
420
- if (rq_data_dir (mmc_queue_req_to_req (mqrq )) != READ )
421
- return ;
422
316
423
- sg_copy_from_buffer (mqrq -> bounce_sg , mqrq -> bounce_sg_len ,
424
- mqrq -> bounce_buf , mqrq -> sg [0 ].length );
317
+ return blk_rq_map_sg (mq -> queue , req , mqrq -> sg );
425
318
}
0 commit comments