|
87 | 87 | * The current io_unit accepting new stripes is always at the end of the list. |
88 | 88 | */ |
89 | 89 |
|
| 90 | +#define PPL_SPACE_SIZE (128 * 1024) |
| 91 | + |
90 | 92 | struct ppl_conf { |
91 | 93 | struct mddev *mddev; |
92 | 94 |
|
@@ -122,6 +124,10 @@ struct ppl_log { |
122 | 124 | * always at the end of io_list */ |
123 | 125 | spinlock_t io_list_lock; |
124 | 126 | struct list_head io_list; /* all io_units of this log */ |
| 127 | + |
| 128 | + sector_t next_io_sector; |
| 129 | + unsigned int entry_space; |
| 130 | + bool use_multippl; |
125 | 131 | }; |
126 | 132 |
|
127 | 133 | #define PPL_IO_INLINE_BVECS 32 |
@@ -264,13 +270,12 @@ static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh) |
264 | 270 | int i; |
265 | 271 | sector_t data_sector = 0; |
266 | 272 | int data_disks = 0; |
267 | | - unsigned int entry_space = (log->rdev->ppl.size << 9) - PPL_HEADER_SIZE; |
268 | 273 | struct r5conf *conf = sh->raid_conf; |
269 | 274 |
|
270 | 275 | pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector); |
271 | 276 |
|
272 | 277 | /* check if current io_unit is full */ |
273 | | - if (io && (io->pp_size == entry_space || |
| 278 | + if (io && (io->pp_size == log->entry_space || |
274 | 279 | io->entries_count == PPL_HDR_MAX_ENTRIES)) { |
275 | 280 | pr_debug("%s: add io_unit blocked by seq: %llu\n", |
276 | 281 | __func__, io->seq); |
@@ -451,12 +456,25 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) |
451 | 456 | pplhdr->entries_count = cpu_to_le32(io->entries_count); |
452 | 457 | pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE)); |
453 | 458 |
|
| 459 | + /* Rewind the buffer if current PPL is larger then remaining space */ |
| 460 | + if (log->use_multippl && |
| 461 | + log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector < |
| 462 | + (PPL_HEADER_SIZE + io->pp_size) >> 9) |
| 463 | + log->next_io_sector = log->rdev->ppl.sector; |
| 464 | + |
| 465 | + |
454 | 466 | bio->bi_end_io = ppl_log_endio; |
455 | 467 | bio->bi_opf = REQ_OP_WRITE | REQ_FUA; |
456 | 468 | bio->bi_bdev = log->rdev->bdev; |
457 | | - bio->bi_iter.bi_sector = log->rdev->ppl.sector; |
| 469 | + bio->bi_iter.bi_sector = log->next_io_sector; |
458 | 470 | bio_add_page(bio, io->header_page, PAGE_SIZE, 0); |
459 | 471 |
|
| 472 | + pr_debug("%s: log->current_io_sector: %llu\n", __func__, |
| 473 | + (unsigned long long)log->next_io_sector); |
| 474 | + |
| 475 | + if (log->use_multippl) |
| 476 | + log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9; |
| 477 | + |
460 | 478 | list_for_each_entry(sh, &io->stripe_list, log_list) { |
461 | 479 | /* entries for full stripe writes have no partial parity */ |
462 | 480 | if (test_bit(STRIPE_FULL_WRITE, &sh->state)) |
@@ -1031,6 +1049,7 @@ static int ppl_load(struct ppl_conf *ppl_conf) |
1031 | 1049 | static void __ppl_exit_log(struct ppl_conf *ppl_conf) |
1032 | 1050 | { |
1033 | 1051 | clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags); |
| 1052 | + clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags); |
1034 | 1053 |
|
1035 | 1054 | kfree(ppl_conf->child_logs); |
1036 | 1055 |
|
@@ -1099,6 +1118,22 @@ static int ppl_validate_rdev(struct md_rdev *rdev) |
1099 | 1118 | return 0; |
1100 | 1119 | } |
1101 | 1120 |
|
| 1121 | +static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev) |
| 1122 | +{ |
| 1123 | + if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE + |
| 1124 | + PPL_HEADER_SIZE) * 2) { |
| 1125 | + log->use_multippl = true; |
| 1126 | + set_bit(MD_HAS_MULTIPLE_PPLS, |
| 1127 | + &log->ppl_conf->mddev->flags); |
| 1128 | + log->entry_space = PPL_SPACE_SIZE; |
| 1129 | + } else { |
| 1130 | + log->use_multippl = false; |
| 1131 | + log->entry_space = (log->rdev->ppl.size << 9) - |
| 1132 | + PPL_HEADER_SIZE; |
| 1133 | + } |
| 1134 | + log->next_io_sector = rdev->ppl.sector; |
| 1135 | +} |
| 1136 | + |
1102 | 1137 | int ppl_init_log(struct r5conf *conf) |
1103 | 1138 | { |
1104 | 1139 | struct ppl_conf *ppl_conf; |
@@ -1196,6 +1231,7 @@ int ppl_init_log(struct r5conf *conf) |
1196 | 1231 | q = bdev_get_queue(rdev->bdev); |
1197 | 1232 | if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) |
1198 | 1233 | need_cache_flush = true; |
| 1234 | + ppl_init_child_log(log, rdev); |
1199 | 1235 | } |
1200 | 1236 | } |
1201 | 1237 |
|
@@ -1261,6 +1297,7 @@ int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add) |
1261 | 1297 | if (!ret) { |
1262 | 1298 | log->rdev = rdev; |
1263 | 1299 | ret = ppl_write_empty_header(log); |
| 1300 | + ppl_init_child_log(log, rdev); |
1264 | 1301 | } |
1265 | 1302 | } else { |
1266 | 1303 | log->rdev = NULL; |
|
0 commit comments