@@ -68,11 +68,21 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error)
6868 }
6969}
7070
71+ #define __entries_not_adjacent (__req , __i , __j ) \
72+ __req->map[__i].coll_idx + 1 != __req->map[__j].coll_idx
73+
74+ #define __is_the_last_chunk (__req , __i ) (__i == (__req->core_line_count - 1))
75+
76+ #define __skip_on_the_last_entry (__skip , __req , __i ) \
77+ __skip * (int)__is_the_last_chunk(__req, __i)
78+
7179static int _ocf_backfill_do (struct ocf_request * req )
7280{
7381 ocf_cache_t cache = req -> cache ;
74- uint64_t addr , bytes , total_bytes = 0 , addr_next = 0 ;
75- uint64_t seek , skip ;
82+ uint64_t metadata_offset = cache -> device -> metadata_offset ;
83+ ocf_cache_line_size_t cache_line_size = ocf_line_size (cache );
84+ uint64_t addr , bytes , total_bytes = 0 ;
85+ uint64_t seek , skip , last_chunk_size ;
7686 uint32_t i ;
7787
7888 req -> data = req -> cp_data ;
@@ -84,9 +94,9 @@ static int _ocf_backfill_do(struct ocf_request *req)
8494 req -> cache_forward_end = _ocf_backfill_complete ;
8595
8696 if (ocf_engine_is_sequential (req )) {
87- addr = cache -> device -> metadata_offset ;
88- addr += req -> map [0 ].coll_idx * ocf_line_size ( cache ) ;
89- addr += req -> addr % ocf_line_size ( cache ) ;
97+ addr = metadata_offset ;
98+ addr += req -> map [0 ].coll_idx * cache_line_size ;
99+ addr += req -> addr % cache_line_size ;
90100
91101 ocf_core_stats_cache_block_update (req -> core , req -> part_id ,
92102 OCF_WRITE , req -> bytes );
@@ -96,49 +106,40 @@ static int _ocf_backfill_do(struct ocf_request *req)
96106 return 0 ;
97107 }
98108
109+ seek = req -> addr % cache_line_size ;
110+ last_chunk_size = (req -> addr + req -> bytes ) % cache_line_size ;
111+ skip = (cache_line_size - last_chunk_size ) % cache_line_size ;
112+
99113 ocf_req_forward_cache_get (req );
100114 for (i = 0 ; i < req -> core_line_count ; i ++ ) {
101- if (addr_next ) {
102- addr = addr_next ;
103- } else {
104- addr = req -> map [i ].coll_idx ;
105- addr *= ocf_line_size (cache );
106- addr += cache -> device -> metadata_offset ;
107- }
108- bytes = ocf_line_size (cache );
109-
110- if (i == 0 ) {
111- seek = req -> addr % ocf_line_size (cache );
112- addr += seek ;
113- bytes -= seek ;
114- }
115-
116115 if (req -> map [i ].status == LOOKUP_HIT ) {
117- /* This is the 1st cache line in the interval,
118- * and it's a hit. Don't write it to the cache.
119- */
120- addr_next = 0 ;
121- total_bytes += bytes ;
116+ /* This is the 1st cache line in the interval, and it's
117+ a hit. Don't write it to the cache */
118+ total_bytes += cache_line_size ;
119+ total_bytes -= seek ;
120+ /* Seek should be taken into account for the first chunk
121+ only */
122+ seek = 0 ;
122123 continue ;
123124 }
124125
125- for (; i < (req -> core_line_count - 1 ); i ++ ) {
126- addr_next = req -> map [i + 1 ].coll_idx ;
127- addr_next *= ocf_line_size (cache );
128- addr_next += cache -> device -> metadata_offset ;
126+ addr = metadata_offset ;
127+ addr += req -> map [i ].coll_idx * cache_line_size ;
129128
130- if (addr_next != (addr + bytes ))
129+ bytes = cache_line_size ;
130+ for (; i < (req -> core_line_count - 1 ); i ++ ) {
131+ if (__entries_not_adjacent (req , i , i + 1 ))
131132 break ;
132133
133- bytes += ocf_line_size ( cache ) ;
134+ bytes += cache_line_size ;
134135 }
135136
136- if ( i == ( req -> core_line_count - 1 )) {
137- skip = ( ocf_line_size ( cache ) -
138- (( req -> addr + req -> bytes ) %
139- ocf_line_size ( cache ))) % ocf_line_size ( cache ) ;
140- bytes -= skip ;
141- }
137+ /* Seek should be taken into account for the first chunk only */
138+ addr += seek ;
139+ bytes -= seek ;
140+ seek = 0 ;
141+
142+ bytes -= __skip_on_the_last_entry ( skip , req , i );
142143
143144 bytes = OCF_MIN (bytes , req -> bytes - total_bytes );
144145
0 commit comments