@@ -138,12 +138,6 @@ static void remove_alru_list(struct ocf_cache *cache, int partition_id,
138138
139139 ENV_BUG_ON (!(collision_index < collision_table_entries ));
140140
141- if (env_atomic_read (& part_alru -> size ) == 0 ) {
142- ocf_cache_log (cache , log_err , "ERROR: Attempt to remove item "
143- "from empty ALRU Cleaning Policy queue!\n" );
144- ENV_BUG ();
145- }
146-
147141 alru = & ocf_metadata_get_cleaning_policy (cache , collision_index )
148142 -> meta .alru ;
149143 /* Set prev and next (even if non existent) */
@@ -400,22 +394,22 @@ int cleaning_policy_alru_initialize(ocf_cache_t cache, int kick_cleaner)
400394 return 0 ;
401395}
402396
403- #define OCF_ALRU_POPULATE_SHARDS_CNT 32
397+ #define OCF_ALRU_FILL_SHARDS_CNT 32
404398
405- struct ocf_alru_populate_context {
399+ struct ocf_alru_fill_context {
406400 ocf_cache_t cache ;
407401 struct {
408402 struct {
409403 ocf_cache_line_t head ;
410404 ocf_cache_line_t tail ;
411405 } part [OCF_USER_IO_CLASS_MAX ];
412- } shard [OCF_ALRU_POPULATE_SHARDS_CNT ] __attribute__((aligned (64 )));
406+ } shard [OCF_ALRU_FILL_SHARDS_CNT ] __attribute__((aligned (64 )));
413407
414- ocf_cleaning_populate_end_t cmpl ;
408+ ocf_cleaning_op_end_t cmpl ;
415409 void * priv ;
416410};
417411
418- static void add_alru_head_populate (struct ocf_alru_populate_context * context ,
412+ static void add_alru_head_populate (struct ocf_alru_fill_context * context ,
419413 unsigned shard_id , ocf_core_id_t part_id ,
420414 ocf_cache_line_t cline )
421415{
@@ -457,10 +451,10 @@ static void add_alru_head_populate(struct ocf_alru_populate_context *context,
457451static int ocf_alru_populate_handle (ocf_parallelize_t parallelize ,
458452 void * priv , unsigned shard_id , unsigned shards_cnt )
459453{
460- struct ocf_alru_populate_context * context = priv ;
454+ struct ocf_alru_fill_context * context = priv ;
461455 ocf_cache_t cache = context -> cache ;
462456 ocf_cache_line_t entries = cache -> device -> collision_table_entries ;
463- ocf_cache_line_t terminator = entries ;
457+ ocf_cache_line_t terminator = cache -> device -> collision_table_entries ;
464458 unsigned part_size [OCF_USER_IO_CLASS_MAX ] = {};
465459 struct ocf_user_part * user_part ;
466460 struct alru_cleaning_policy * part_alru ;
@@ -506,10 +500,49 @@ static int ocf_alru_populate_handle(ocf_parallelize_t parallelize,
506500 return 0 ;
507501}
508502
509- static void ocf_alru_populate_finish (ocf_parallelize_t parallelize ,
503+ static int ocf_alru_prepopulate_handle (ocf_parallelize_t parallelize ,
504+ void * priv , unsigned shard_id , unsigned shards_cnt )
505+ {
506+ struct ocf_alru_fill_context * context = priv ;
507+ ocf_cache_t cache = context -> cache ;
508+ ocf_cache_line_t entries = cache -> device -> collision_table_entries ;
509+ ocf_cache_line_t terminator = cache -> device -> collision_table_entries ;
510+ unsigned part_size [OCF_USER_IO_CLASS_MAX ] = {};
511+ struct ocf_user_part * user_part ;
512+ struct alru_cleaning_policy * part_alru ;
513+ ocf_part_id_t part_id ;
514+ ocf_cache_line_t cline , portion ;
515+ uint32_t begin , end ;
516+ uint32_t step = 0 ;
517+ int i ;
518+
519+ portion = OCF_DIV_ROUND_UP ((uint64_t )entries , shards_cnt );
520+ begin = portion * shard_id ;
521+ end = OCF_MIN ((uint64_t )portion * (shard_id + 1 ), entries );
522+
523+ for (i = 0 ; i < OCF_USER_IO_CLASS_MAX ; i ++ ) {
524+ context -> shard [shard_id ].part [i ].head = terminator ;
525+ context -> shard [shard_id ].part [i ].tail = terminator ;
526+ }
527+
528+ for (cline = begin ; cline < end ; cline ++ ) {
529+ OCF_COND_RESCHED_DEFAULT (step );
530+
531+ cleaning_policy_alru_init_cache_block (cache , cline );
532+ }
533+
534+ for_each_user_part (cache , user_part , part_id ) {
535+ part_alru = & user_part -> clean_pol -> policy .alru ;
536+ env_atomic_add (part_size [part_id ], & part_alru -> size );
537+ }
538+
539+ return 0 ;
540+ }
541+
542+ static void ocf_alru_fill_finish (ocf_parallelize_t parallelize ,
510543 void * priv , int error )
511544{
512- struct ocf_alru_populate_context * context = priv ;
545+ struct ocf_alru_fill_context * context = priv ;
513546 ocf_cache_t cache = context -> cache ;
514547 ocf_part_id_t part_id ;
515548 ocf_cache_line_t head , tail ;
@@ -519,7 +552,7 @@ static void ocf_alru_populate_finish(ocf_parallelize_t parallelize,
519552 goto end ;
520553
521554 for (part_id = 0 ; part_id < OCF_USER_IO_CLASS_MAX ; part_id ++ ) {
522- for (shard = 0 ; shard < OCF_ALRU_POPULATE_SHARDS_CNT ; shard ++ ) {
555+ for (shard = 0 ; shard < OCF_ALRU_FILL_SHARDS_CNT ; shard ++ ) {
523556 head = context -> shard [shard ].part [part_id ].head ;
524557 tail = context -> shard [shard ].part [part_id ].tail ;
525558
@@ -535,19 +568,20 @@ static void ocf_alru_populate_finish(ocf_parallelize_t parallelize,
535568 ocf_parallelize_destroy (parallelize );
536569}
537570
538- void cleaning_policy_alru_populate (ocf_cache_t cache ,
539- ocf_cleaning_populate_end_t cmpl , void * priv )
571+ static void cleaning_policy_alru_fill (ocf_cache_t cache ,
572+ ocf_cleaning_op_end_t cmpl , void * priv ,
573+ ocf_parallelize_handle_t fill_handle )
540574{
541- struct ocf_alru_populate_context * context ;
575+ struct ocf_alru_fill_context * context ;
542576 ocf_parallelize_t parallelize ;
543577 struct alru_cleaning_policy * part_alru ;
544578 struct ocf_user_part * user_part ;
545579 ocf_part_id_t part_id ;
546580 int result ;
547581
548582 result = ocf_parallelize_create (& parallelize , cache ,
549- OCF_ALRU_POPULATE_SHARDS_CNT , sizeof (* context ),
550- ocf_alru_populate_handle , ocf_alru_populate_finish ,
583+ OCF_ALRU_FILL_SHARDS_CNT , sizeof (* context ),
584+ fill_handle , ocf_alru_fill_finish ,
551585 true);
552586 if (result ) {
553587 cmpl (priv , result );
@@ -571,6 +605,105 @@ void cleaning_policy_alru_populate(ocf_cache_t cache,
571605 ocf_parallelize_run (parallelize );
572606}
573607
608+ void cleaning_policy_alru_populate (ocf_cache_t cache ,
609+ ocf_cleaning_op_end_t cmpl , void * priv )
610+ {
611+ cleaning_policy_alru_fill (cache , cmpl , priv ,
612+ ocf_alru_populate_handle );
613+ }
614+
615+ void cleaning_policy_alru_prepopulate (ocf_cache_t cache ,
616+ ocf_cleaning_op_end_t cmpl , void * priv )
617+ {
618+ cleaning_policy_alru_fill (cache , cmpl , priv ,
619+ ocf_alru_prepopulate_handle );
620+ }
621+
622+ struct ocf_alru_update_context {
623+ ocf_cache_t cache ;
624+ ocf_cleaning_op_end_t cmpl ;
625+ void * priv ;
626+ };
627+
628+ static int ocf_alru_update_handle (ocf_parallelize_t parallelize ,
629+ void * priv , unsigned shard_id , unsigned shards_cnt )
630+ {
631+ struct ocf_alru_update_context * context = priv ;
632+ ocf_cache_t cache = context -> cache ;
633+ ocf_cache_line_t entries = cache -> device -> hash_table_entries ;
634+ ocf_cache_line_t terminator = cache -> device -> collision_table_entries ;
635+ ocf_cache_line_t hash , cline , portion ;
636+ uint32_t begin , end ;
637+ unsigned lock_idx = shard_id % OCF_NUM_GLOBAL_META_LOCKS ;
638+ uint32_t step = 0 ;
639+
640+ portion = OCF_DIV_ROUND_UP ((uint64_t )entries , shards_cnt );
641+ begin = portion * shard_id ;
642+ end = OCF_MIN ((uint64_t )portion * (shard_id + 1 ), entries );
643+
644+ ocf_metadata_start_shared_access (& cache -> metadata .lock , lock_idx );
645+ for (hash = begin ; hash < end ; hash ++ ) {
646+ OCF_COND_RESCHED_DEFAULT (step );
647+
648+ ocf_hb_id_naked_lock_rd (& cache -> metadata .lock , hash );
649+ cline = ocf_metadata_get_hash (cache , hash );
650+
651+ while (cline != terminator ) {
652+ if (metadata_test_dirty (cache , cline )) {
653+ cleaning_policy_alru_set_hot_cache_line (cache ,
654+ cline );
655+ }
656+
657+ cline = ocf_metadata_get_collision_next (cache , cline );
658+ }
659+ ocf_hb_id_naked_unlock_rd (& cache -> metadata .lock , hash );
660+ }
661+ ocf_metadata_end_shared_access (& cache -> metadata .lock , lock_idx );
662+
663+ return 0 ;
664+ }
665+
666+ static void ocf_alru_update_finish (ocf_parallelize_t parallelize ,
667+ void * priv , int error )
668+ {
669+ struct ocf_alru_update_context * context = priv ;
670+ ocf_cache_t cache = context -> cache ;
671+
672+ if (error )
673+ goto end ;
674+
675+ ocf_kick_cleaner (cache );
676+
677+ end :
678+ context -> cmpl (context -> priv , error );
679+
680+ ocf_parallelize_destroy (parallelize );
681+ }
682+
683+ void cleaning_policy_alru_update (ocf_cache_t cache ,
684+ ocf_cleaning_op_end_t cmpl , void * priv )
685+ {
686+ struct ocf_alru_update_context * context ;
687+ ocf_parallelize_t parallelize ;
688+ int result ;
689+
690+ result = ocf_parallelize_create (& parallelize , cache , 0 ,
691+ sizeof (* context ),
692+ ocf_alru_update_handle , ocf_alru_update_finish ,
693+ true);
694+ if (result ) {
695+ cmpl (priv , result );
696+ return ;
697+ }
698+
699+ context = ocf_parallelize_get_priv (parallelize );
700+ context -> cache = cache ;
701+ context -> cmpl = cmpl ;
702+ context -> priv = priv ;
703+
704+ ocf_parallelize_run (parallelize );
705+ }
706+
574707void cleaning_policy_alru_deinitialize (struct ocf_cache * cache )
575708{
576709 struct alru_context * alru = cache -> cleaner .cleaning_policy_context ;
0 commit comments