@@ -267,27 +267,9 @@ struct blk_mq_queue_data {
267
267
bool last ;
268
268
};
269
269
270
- typedef blk_status_t (queue_rq_fn )(struct blk_mq_hw_ctx * ,
271
- const struct blk_mq_queue_data * );
272
- typedef void (commit_rqs_fn )(struct blk_mq_hw_ctx * );
273
- typedef bool (get_budget_fn )(struct request_queue * );
274
- typedef void (put_budget_fn )(struct request_queue * );
275
- typedef enum blk_eh_timer_return (timeout_fn )(struct request * , bool );
276
- typedef int (init_hctx_fn )(struct blk_mq_hw_ctx * , void * , unsigned int );
277
- typedef void (exit_hctx_fn )(struct blk_mq_hw_ctx * , unsigned int );
278
- typedef int (init_request_fn )(struct blk_mq_tag_set * set , struct request * ,
279
- unsigned int , unsigned int );
280
- typedef void (exit_request_fn )(struct blk_mq_tag_set * set , struct request * ,
281
- unsigned int );
282
-
283
270
typedef bool (busy_iter_fn )(struct blk_mq_hw_ctx * , struct request * , void * ,
284
271
bool );
285
272
typedef bool (busy_tag_iter_fn )(struct request * , void * , bool );
286
- typedef int (poll_fn )(struct blk_mq_hw_ctx * );
287
- typedef int (map_queues_fn )(struct blk_mq_tag_set * set );
288
- typedef bool (busy_fn )(struct request_queue * );
289
- typedef void (complete_fn )(struct request * );
290
- typedef void (cleanup_rq_fn )(struct request * );
291
273
292
274
/**
293
275
* struct blk_mq_ops - Callback functions that implements block driver
@@ -297,7 +279,8 @@ struct blk_mq_ops {
297
279
/**
298
280
* @queue_rq: Queue a new request from block IO.
299
281
*/
300
- queue_rq_fn * queue_rq ;
282
+ blk_status_t (* queue_rq )(struct blk_mq_hw_ctx * ,
283
+ const struct blk_mq_queue_data * );
301
284
302
285
/**
303
286
* @commit_rqs: If a driver uses bd->last to judge when to submit
@@ -306,45 +289,46 @@ struct blk_mq_ops {
306
289
* purpose of kicking the hardware (which the last request otherwise
307
290
* would have done).
308
291
*/
309
- commit_rqs_fn * commit_rqs ;
292
+ void ( * commit_rqs )( struct blk_mq_hw_ctx * ) ;
310
293
311
294
/**
312
295
* @get_budget: Reserve budget before queue request, once .queue_rq is
313
296
* run, it is driver's responsibility to release the
314
297
* reserved budget. Also we have to handle failure case
315
298
* of .get_budget for avoiding I/O deadlock.
316
299
*/
317
- get_budget_fn * get_budget ;
300
+ bool (* get_budget )(struct request_queue * );
301
+
318
302
/**
319
303
* @put_budget: Release the reserved budget.
320
304
*/
321
- put_budget_fn * put_budget ;
305
+ void ( * put_budget )( struct request_queue * ) ;
322
306
323
307
/**
324
308
* @timeout: Called on request timeout.
325
309
*/
326
- timeout_fn * timeout ;
310
+ enum blk_eh_timer_return ( * timeout )( struct request * , bool ) ;
327
311
328
312
/**
329
313
* @poll: Called to poll for completion of a specific tag.
330
314
*/
331
- poll_fn * poll ;
315
+ int ( * poll )( struct blk_mq_hw_ctx * ) ;
332
316
333
317
/**
334
318
* @complete: Mark the request as complete.
335
319
*/
336
- complete_fn * complete ;
320
+ void ( * complete )( struct request * ) ;
337
321
338
322
/**
339
323
* @init_hctx: Called when the block layer side of a hardware queue has
340
324
* been set up, allowing the driver to allocate/init matching
341
325
* structures.
342
326
*/
343
- init_hctx_fn * init_hctx ;
327
+ int ( * init_hctx )( struct blk_mq_hw_ctx * , void * , unsigned int ) ;
344
328
/**
345
329
* @exit_hctx: Ditto for exit/teardown.
346
330
*/
347
- exit_hctx_fn * exit_hctx ;
331
+ void ( * exit_hctx )( struct blk_mq_hw_ctx * , unsigned int ) ;
348
332
349
333
/**
350
334
* @init_request: Called for every command allocated by the block layer
@@ -353,11 +337,13 @@ struct blk_mq_ops {
353
337
* Tag greater than or equal to queue_depth is for setting up
354
338
* flush request.
355
339
*/
356
- init_request_fn * init_request ;
340
+ int (* init_request )(struct blk_mq_tag_set * set , struct request * ,
341
+ unsigned int , unsigned int );
357
342
/**
358
343
* @exit_request: Ditto for exit/teardown.
359
344
*/
360
- exit_request_fn * exit_request ;
345
+ void (* exit_request )(struct blk_mq_tag_set * set , struct request * ,
346
+ unsigned int );
361
347
362
348
/**
363
349
* @initialize_rq_fn: Called from inside blk_get_request().
@@ -368,18 +354,18 @@ struct blk_mq_ops {
368
354
* @cleanup_rq: Called before freeing one request which isn't completed
369
355
* yet, and usually for freeing the driver private data.
370
356
*/
371
- cleanup_rq_fn * cleanup_rq ;
357
+ void ( * cleanup_rq )( struct request * ) ;
372
358
373
359
/**
374
360
* @busy: If set, returns whether or not this queue currently is busy.
375
361
*/
376
- busy_fn * busy ;
362
+ bool ( * busy )( struct request_queue * ) ;
377
363
378
364
/**
379
365
* @map_queues: This allows drivers specify their own queue mapping by
380
366
* overriding the setup-time function that builds the mq_map.
381
367
*/
382
- map_queues_fn * map_queues ;
368
+ int ( * map_queues )( struct blk_mq_tag_set * set ) ;
383
369
384
370
#ifdef CONFIG_BLK_DEBUG_FS
385
371
/**
0 commit comments