|
8 | 8 |
|
9 | 9 | struct xfs_scrub; |
10 | 10 |
|
| 11 | +struct xchk_relax { |
| 12 | + unsigned long next_resched; |
| 13 | + unsigned int resched_nr; |
| 14 | + bool interruptible; |
| 15 | +}; |
| 16 | + |
| 17 | +/* Yield to the scheduler at most 10x per second. */ |
| 18 | +#define XCHK_RELAX_NEXT (jiffies + (HZ / 10)) |
| 19 | + |
| 20 | +#define INIT_XCHK_RELAX \ |
| 21 | + (struct xchk_relax){ \ |
| 22 | + .next_resched = XCHK_RELAX_NEXT, \ |
| 23 | + .resched_nr = 0, \ |
| 24 | + .interruptible = true, \ |
| 25 | + } |
| 26 | + |
| 27 | +/* |
| 28 | + * Relax during a scrub operation and exit if there's a fatal signal pending. |
| 29 | + * |
| 30 | + * If preemption is disabled, we need to yield to the scheduler every now and |
| 31 | + * then so that we don't run afoul of the soft lockup watchdog or RCU stall |
| 32 | + * detector. cond_resched calls are somewhat expensive (~5ns) so we want to |
| 33 | + * ratelimit this to 10x per second. Amortize the cost of the other checks by |
| 34 | + * only doing it once every 100 calls. |
| 35 | + */ |
| 36 | +static inline int xchk_maybe_relax(struct xchk_relax *widget) |
| 37 | +{ |
| 38 | + /* Amortize the cost of scheduling and checking signals. */ |
| 39 | + if (likely(++widget->resched_nr < 100)) |
| 40 | + return 0; |
| 41 | + widget->resched_nr = 0; |
| 42 | + |
| 43 | + if (unlikely(widget->next_resched <= jiffies)) { |
| 44 | + cond_resched(); |
| 45 | + widget->next_resched = XCHK_RELAX_NEXT; |
| 46 | + } |
| 47 | + |
| 48 | + if (widget->interruptible && fatal_signal_pending(current)) |
| 49 | + return -EINTR; |
| 50 | + |
| 51 | + return 0; |
| 52 | +} |
| 53 | + |
11 | 54 | /* |
12 | 55 | * Standard flags for allocating memory within scrub. NOFS context is |
13 | 56 | * configured by the process allocation scope. Scrub and repair must be able |
@@ -123,6 +166,9 @@ struct xfs_scrub { |
123 | 166 | */ |
124 | 167 | unsigned int sick_mask; |
125 | 168 |
|
| 169 | + /* next time we want to cond_resched() */ |
| 170 | + struct xchk_relax relax; |
| 171 | + |
126 | 172 | /* State tracking for single-AG operations. */ |
127 | 173 | struct xchk_ag sa; |
128 | 174 | }; |
@@ -167,6 +213,24 @@ struct xfs_scrub_subord *xchk_scrub_create_subord(struct xfs_scrub *sc, |
167 | 213 | unsigned int subtype); |
168 | 214 | void xchk_scrub_free_subord(struct xfs_scrub_subord *sub); |
169 | 215 |
|
| 216 | +/* |
| 217 | + * We /could/ terminate a scrub/repair operation early. If we're not |
| 218 | + * in a good place to continue (fatal signal, etc.) then bail out. |
| 219 | + * Note that we're careful not to make any judgements about *error. |
| 220 | + */ |
| 221 | +static inline bool |
| 222 | +xchk_should_terminate( |
| 223 | + struct xfs_scrub *sc, |
| 224 | + int *error) |
| 225 | +{ |
| 226 | + if (xchk_maybe_relax(&sc->relax)) { |
| 227 | + if (*error == 0) |
| 228 | + *error = -EINTR; |
| 229 | + return true; |
| 230 | + } |
| 231 | + return false; |
| 232 | +} |
| 233 | + |
170 | 234 | /* Metadata scrubbers */ |
171 | 235 | int xchk_tester(struct xfs_scrub *sc); |
172 | 236 | int xchk_superblock(struct xfs_scrub *sc); |
|
0 commit comments