@@ -84,16 +84,6 @@ static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
8484 return transfer_len <= hpb -> pre_req_max_tr_len ;
8585}
8686
87- /*
88- * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
89- * default. It is possible to change range of transfer_len through sysfs.
90- */
91- static inline bool ufshpb_is_required_wb (struct ufshpb_lu * hpb , int len )
92- {
93- return len > hpb -> pre_req_min_tr_len &&
94- len <= hpb -> pre_req_max_tr_len ;
95- }
96-
9787static bool ufshpb_is_general_lun (int lun )
9888{
9989 return lun < UFS_UPIU_MAX_UNIT_NUM_ID ;
@@ -334,7 +324,7 @@ ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
334324
335325static void
336326ufshpb_set_hpb_read_to_upiu (struct ufs_hba * hba , struct ufshcd_lrb * lrbp ,
337- __be64 ppn , u8 transfer_len , int read_id )
327+ __be64 ppn , u8 transfer_len )
338328{
339329 unsigned char * cdb = lrbp -> cmd -> cmnd ;
340330 __be64 ppn_tmp = ppn ;
@@ -346,256 +336,11 @@ ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
346336 /* ppn value is stored as big-endian in the host memory */
347337 memcpy (& cdb [6 ], & ppn_tmp , sizeof (__be64 ));
348338 cdb [14 ] = transfer_len ;
349- cdb [15 ] = read_id ;
339+ cdb [15 ] = 0 ;
350340
351341 lrbp -> cmd -> cmd_len = UFS_CDB_SIZE ;
352342}
353343
354- static inline void ufshpb_set_write_buf_cmd (unsigned char * cdb ,
355- unsigned long lpn , unsigned int len ,
356- int read_id )
357- {
358- cdb [0 ] = UFSHPB_WRITE_BUFFER ;
359- cdb [1 ] = UFSHPB_WRITE_BUFFER_PREFETCH_ID ;
360-
361- put_unaligned_be32 (lpn , & cdb [2 ]);
362- cdb [6 ] = read_id ;
363- put_unaligned_be16 (len * HPB_ENTRY_SIZE , & cdb [7 ]);
364-
365- cdb [9 ] = 0x00 ; /* Control = 0x00 */
366- }
367-
368- static struct ufshpb_req * ufshpb_get_pre_req (struct ufshpb_lu * hpb )
369- {
370- struct ufshpb_req * pre_req ;
371-
372- if (hpb -> num_inflight_pre_req >= hpb -> throttle_pre_req ) {
373- dev_info (& hpb -> sdev_ufs_lu -> sdev_dev ,
374- "pre_req throttle. inflight %d throttle %d" ,
375- hpb -> num_inflight_pre_req , hpb -> throttle_pre_req );
376- return NULL ;
377- }
378-
379- pre_req = list_first_entry_or_null (& hpb -> lh_pre_req_free ,
380- struct ufshpb_req , list_req );
381- if (!pre_req ) {
382- dev_info (& hpb -> sdev_ufs_lu -> sdev_dev , "There is no pre_req" );
383- return NULL ;
384- }
385-
386- list_del_init (& pre_req -> list_req );
387- hpb -> num_inflight_pre_req ++ ;
388-
389- return pre_req ;
390- }
391-
392- static inline void ufshpb_put_pre_req (struct ufshpb_lu * hpb ,
393- struct ufshpb_req * pre_req )
394- {
395- pre_req -> req = NULL ;
396- bio_reset (pre_req -> bio );
397- list_add_tail (& pre_req -> list_req , & hpb -> lh_pre_req_free );
398- hpb -> num_inflight_pre_req -- ;
399- }
400-
401- static void ufshpb_pre_req_compl_fn (struct request * req , blk_status_t error )
402- {
403- struct ufshpb_req * pre_req = (struct ufshpb_req * )req -> end_io_data ;
404- struct ufshpb_lu * hpb = pre_req -> hpb ;
405- unsigned long flags ;
406-
407- if (error ) {
408- struct scsi_cmnd * cmd = blk_mq_rq_to_pdu (req );
409- struct scsi_sense_hdr sshdr ;
410-
411- dev_err (& hpb -> sdev_ufs_lu -> sdev_dev , "block status %d" , error );
412- scsi_command_normalize_sense (cmd , & sshdr );
413- dev_err (& hpb -> sdev_ufs_lu -> sdev_dev ,
414- "code %x sense_key %x asc %x ascq %x" ,
415- sshdr .response_code ,
416- sshdr .sense_key , sshdr .asc , sshdr .ascq );
417- dev_err (& hpb -> sdev_ufs_lu -> sdev_dev ,
418- "byte4 %x byte5 %x byte6 %x additional_len %x" ,
419- sshdr .byte4 , sshdr .byte5 ,
420- sshdr .byte6 , sshdr .additional_length );
421- }
422-
423- blk_mq_free_request (req );
424- spin_lock_irqsave (& hpb -> rgn_state_lock , flags );
425- ufshpb_put_pre_req (pre_req -> hpb , pre_req );
426- spin_unlock_irqrestore (& hpb -> rgn_state_lock , flags );
427- }
428-
429- static int ufshpb_prep_entry (struct ufshpb_req * pre_req , struct page * page )
430- {
431- struct ufshpb_lu * hpb = pre_req -> hpb ;
432- struct ufshpb_region * rgn ;
433- struct ufshpb_subregion * srgn ;
434- __be64 * addr ;
435- int offset = 0 ;
436- int copied ;
437- unsigned long lpn = pre_req -> wb .lpn ;
438- int rgn_idx , srgn_idx , srgn_offset ;
439- unsigned long flags ;
440-
441- addr = page_address (page );
442- ufshpb_get_pos_from_lpn (hpb , lpn , & rgn_idx , & srgn_idx , & srgn_offset );
443-
444- spin_lock_irqsave (& hpb -> rgn_state_lock , flags );
445-
446- next_offset :
447- rgn = hpb -> rgn_tbl + rgn_idx ;
448- srgn = rgn -> srgn_tbl + srgn_idx ;
449-
450- if (!ufshpb_is_valid_srgn (rgn , srgn ))
451- goto mctx_error ;
452-
453- if (!srgn -> mctx )
454- goto mctx_error ;
455-
456- copied = ufshpb_fill_ppn_from_page (hpb , srgn -> mctx , srgn_offset ,
457- pre_req -> wb .len - offset ,
458- & addr [offset ]);
459-
460- if (copied < 0 )
461- goto mctx_error ;
462-
463- offset += copied ;
464- srgn_offset += copied ;
465-
466- if (srgn_offset == hpb -> entries_per_srgn ) {
467- srgn_offset = 0 ;
468-
469- if (++ srgn_idx == hpb -> srgns_per_rgn ) {
470- srgn_idx = 0 ;
471- rgn_idx ++ ;
472- }
473- }
474-
475- if (offset < pre_req -> wb .len )
476- goto next_offset ;
477-
478- spin_unlock_irqrestore (& hpb -> rgn_state_lock , flags );
479- return 0 ;
480- mctx_error :
481- spin_unlock_irqrestore (& hpb -> rgn_state_lock , flags );
482- return - ENOMEM ;
483- }
484-
485- static int ufshpb_pre_req_add_bio_page (struct ufshpb_lu * hpb ,
486- struct request_queue * q ,
487- struct ufshpb_req * pre_req )
488- {
489- struct page * page = pre_req -> wb .m_page ;
490- struct bio * bio = pre_req -> bio ;
491- int entries_bytes , ret ;
492-
493- if (!page )
494- return - ENOMEM ;
495-
496- if (ufshpb_prep_entry (pre_req , page ))
497- return - ENOMEM ;
498-
499- entries_bytes = pre_req -> wb .len * sizeof (__be64 );
500-
501- ret = bio_add_pc_page (q , bio , page , entries_bytes , 0 );
502- if (ret != entries_bytes ) {
503- dev_err (& hpb -> sdev_ufs_lu -> sdev_dev ,
504- "bio_add_pc_page fail: %d" , ret );
505- return - ENOMEM ;
506- }
507- return 0 ;
508- }
509-
510- static inline int ufshpb_get_read_id (struct ufshpb_lu * hpb )
511- {
512- if (++ hpb -> cur_read_id >= MAX_HPB_READ_ID )
513- hpb -> cur_read_id = 1 ;
514- return hpb -> cur_read_id ;
515- }
516-
517- static int ufshpb_execute_pre_req (struct ufshpb_lu * hpb , struct scsi_cmnd * cmd ,
518- struct ufshpb_req * pre_req , int read_id )
519- {
520- struct scsi_device * sdev = cmd -> device ;
521- struct request_queue * q = sdev -> request_queue ;
522- struct request * req ;
523- struct scsi_request * rq ;
524- struct bio * bio = pre_req -> bio ;
525-
526- pre_req -> hpb = hpb ;
527- pre_req -> wb .lpn = sectors_to_logical (cmd -> device ,
528- blk_rq_pos (scsi_cmd_to_rq (cmd )));
529- pre_req -> wb .len = sectors_to_logical (cmd -> device ,
530- blk_rq_sectors (scsi_cmd_to_rq (cmd )));
531- if (ufshpb_pre_req_add_bio_page (hpb , q , pre_req ))
532- return - ENOMEM ;
533-
534- req = pre_req -> req ;
535-
536- /* 1. request setup */
537- blk_rq_append_bio (req , bio );
538- req -> rq_disk = NULL ;
539- req -> end_io_data = (void * )pre_req ;
540- req -> end_io = ufshpb_pre_req_compl_fn ;
541-
542- /* 2. scsi_request setup */
543- rq = scsi_req (req );
544- rq -> retries = 1 ;
545-
546- ufshpb_set_write_buf_cmd (rq -> cmd , pre_req -> wb .lpn , pre_req -> wb .len ,
547- read_id );
548- rq -> cmd_len = scsi_command_size (rq -> cmd );
549-
550- if (blk_insert_cloned_request (q , req ) != BLK_STS_OK )
551- return - EAGAIN ;
552-
553- hpb -> stats .pre_req_cnt ++ ;
554-
555- return 0 ;
556- }
557-
558- static int ufshpb_issue_pre_req (struct ufshpb_lu * hpb , struct scsi_cmnd * cmd ,
559- int * read_id )
560- {
561- struct ufshpb_req * pre_req ;
562- struct request * req = NULL ;
563- unsigned long flags ;
564- int _read_id ;
565- int ret = 0 ;
566-
567- req = blk_get_request (cmd -> device -> request_queue ,
568- REQ_OP_DRV_OUT | REQ_SYNC , BLK_MQ_REQ_NOWAIT );
569- if (IS_ERR (req ))
570- return - EAGAIN ;
571-
572- spin_lock_irqsave (& hpb -> rgn_state_lock , flags );
573- pre_req = ufshpb_get_pre_req (hpb );
574- if (!pre_req ) {
575- ret = - EAGAIN ;
576- goto unlock_out ;
577- }
578- _read_id = ufshpb_get_read_id (hpb );
579- spin_unlock_irqrestore (& hpb -> rgn_state_lock , flags );
580-
581- pre_req -> req = req ;
582-
583- ret = ufshpb_execute_pre_req (hpb , cmd , pre_req , _read_id );
584- if (ret )
585- goto free_pre_req ;
586-
587- * read_id = _read_id ;
588-
589- return ret ;
590- free_pre_req :
591- spin_lock_irqsave (& hpb -> rgn_state_lock , flags );
592- ufshpb_put_pre_req (hpb , pre_req );
593- unlock_out :
594- spin_unlock_irqrestore (& hpb -> rgn_state_lock , flags );
595- blk_put_request (req );
596- return ret ;
597- }
598-
599344/*
600345 * This function will set up HPB read command using host-side L2P map data.
601346 */
@@ -609,7 +354,6 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
609354 __be64 ppn ;
610355 unsigned long flags ;
611356 int transfer_len , rgn_idx , srgn_idx , srgn_offset ;
612- int read_id = 0 ;
613357 int err = 0 ;
614358
615359 hpb = ufshpb_get_hpb_data (cmd -> device );
@@ -685,24 +429,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
685429 dev_err (hba -> dev , "get ppn failed. err %d\n" , err );
686430 return err ;
687431 }
688- if (!ufshpb_is_legacy (hba ) &&
689- ufshpb_is_required_wb (hpb , transfer_len )) {
690- err = ufshpb_issue_pre_req (hpb , cmd , & read_id );
691- if (err ) {
692- unsigned long timeout ;
693-
694- timeout = cmd -> jiffies_at_alloc + msecs_to_jiffies (
695- hpb -> params .requeue_timeout_ms );
696-
697- if (time_before (jiffies , timeout ))
698- return - EAGAIN ;
699-
700- hpb -> stats .miss_cnt ++ ;
701- return 0 ;
702- }
703- }
704432
705- ufshpb_set_hpb_read_to_upiu (hba , lrbp , ppn , transfer_len , read_id );
433+ ufshpb_set_hpb_read_to_upiu (hba , lrbp , ppn , transfer_len );
706434
707435 hpb -> stats .hit_cnt ++ ;
708436 return 0 ;
@@ -1841,16 +1569,11 @@ static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
18411569 u32 entries_per_rgn ;
18421570 u64 rgn_mem_size , tmp ;
18431571
1844- /* for pre_req */
1845- hpb -> pre_req_min_tr_len = hpb_dev_info -> max_hpb_single_cmd + 1 ;
1846-
18471572 if (ufshpb_is_legacy (hba ))
18481573 hpb -> pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH ;
18491574 else
18501575 hpb -> pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH ;
18511576
1852- hpb -> cur_read_id = 0 ;
1853-
18541577 hpb -> lu_pinned_start = hpb_lu_info -> pinned_start ;
18551578 hpb -> lu_pinned_end = hpb_lu_info -> num_pinned ?
18561579 (hpb_lu_info -> pinned_start + hpb_lu_info -> num_pinned - 1 )
0 commit comments