Skip to content

Commit df442a4

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "21 patches. Subsystems affected by this patch series: MAINTAINERS, mailmap, and mm (mlock, pagecache, damon, slub, memcg, hugetlb, and pagecache)" * emailed patches from Andrew Morton <[email protected]>: (21 commits) mm: bdi: initialize bdi_min_ratio when bdi is unregistered hugetlbfs: fix issue of preallocation of gigantic pages can't work mm/memcg: relocate mod_objcg_mlstate(), get_obj_stock() and put_obj_stock() mm/slub: fix endianness bug for alloc/free_traces attributes selftests/damon: split test cases selftests/damon: test debugfs file reads/writes with huge count selftests/damon: test wrong DAMOS condition ranges input selftests/damon: test DAMON enabling with empty target_ids case selftests/damon: skip test if DAMON is running mm/damon/vaddr-test: remove unnecessary variables mm/damon/vaddr-test: split a test function having >1024 bytes frame size mm/damon/vaddr: remove an unnecessary warning message mm/damon/core: remove unnecessary error messages mm/damon/dbgfs: remove an unnecessary error message mm/damon/core: use better timer mechanisms selection threshold mm/damon/core: fix fake load reports due to uninterruptible sleeps timers: implement usleep_idle_range() filemap: remove PageHWPoison check from next_uptodate_page() mailmap: update email address for Guo Ren MAINTAINERS: update kdump maintainers ...
2 parents 6f51352 + 3c376df commit df442a4

23 files changed

+322
-207
lines changed

.mailmap

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,8 @@ Greg Kroah-Hartman <[email protected]>
126126
Greg Kroah-Hartman <[email protected]>
127127
128128
129+
130+
129131
Gustavo Padovan <[email protected]>
130132
Gustavo Padovan <[email protected]>
131133

MAINTAINERS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10279,9 +10279,9 @@ F: lib/Kconfig.kcsan
1027910279
F: scripts/Makefile.kcsan
1028010280

1028110281
KDUMP
10282-
M: Dave Young <[email protected]>
1028310282
M: Baoquan He <[email protected]>
1028410283
R: Vivek Goyal <[email protected]>
10284+
R: Dave Young <[email protected]>
1028510285
1028610286
S: Maintained
1028710287
W: http://lse.sourceforge.net/kdump/

include/linux/delay.h

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
*/
2121

2222
#include <linux/math.h>
23+
#include <linux/sched.h>
2324

2425
extern unsigned long loops_per_jiffy;
2526

@@ -58,7 +59,18 @@ void calibrate_delay(void);
5859
void __attribute__((weak)) calibration_delay_done(void);
5960
void msleep(unsigned int msecs);
6061
unsigned long msleep_interruptible(unsigned int msecs);
61-
void usleep_range(unsigned long min, unsigned long max);
62+
void usleep_range_state(unsigned long min, unsigned long max,
63+
unsigned int state);
64+
65+
static inline void usleep_range(unsigned long min, unsigned long max)
66+
{
67+
usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
68+
}
69+
70+
static inline void usleep_idle_range(unsigned long min, unsigned long max)
71+
{
72+
usleep_range_state(min, max, TASK_IDLE);
73+
}
6274

6375
static inline void ssleep(unsigned int seconds)
6476
{

include/uapi/linux/resource.h

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,10 +66,17 @@ struct rlimit64 {
6666
#define _STK_LIM (8*1024*1024)
6767

6868
/*
69-
* GPG2 wants 64kB of mlocked memory, to make sure pass phrases
70-
* and other sensitive information are never written to disk.
69+
* Limit the amount of locked memory by some sane default:
70+
* root can always increase this limit if needed.
71+
*
72+
* The main use-cases are (1) preventing sensitive memory
73+
* from being swapped; (2) real-time operations; (3) via
74+
* IOURING_REGISTER_BUFFERS.
75+
*
76+
* The first two don't need much. The latter will take as
77+
* much as it can get. 8MB is a reasonably sane default.
7178
*/
72-
#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024)
79+
#define MLOCK_LIMIT (8*1024*1024)
7380

7481
/*
7582
* Due to binary compatibility, the actual resource numbers

kernel/time/timer.c

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2054,26 +2054,28 @@ unsigned long msleep_interruptible(unsigned int msecs)
20542054
EXPORT_SYMBOL(msleep_interruptible);
20552055

20562056
/**
2057-
* usleep_range - Sleep for an approximate time
2058-
* @min: Minimum time in usecs to sleep
2059-
* @max: Maximum time in usecs to sleep
2057+
* usleep_range_state - Sleep for an approximate time in a given state
2058+
* @min: Minimum time in usecs to sleep
2059+
* @max: Maximum time in usecs to sleep
2060+
* @state: State of the current task that will be while sleeping
20602061
*
20612062
* In non-atomic context where the exact wakeup time is flexible, use
2062-
* usleep_range() instead of udelay(). The sleep improves responsiveness
2063+
* usleep_range_state() instead of udelay(). The sleep improves responsiveness
20632064
* by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
20642065
* power usage by allowing hrtimers to take advantage of an already-
20652066
* scheduled interrupt instead of scheduling a new one just for this sleep.
20662067
*/
2067-
void __sched usleep_range(unsigned long min, unsigned long max)
2068+
void __sched usleep_range_state(unsigned long min, unsigned long max,
2069+
unsigned int state)
20682070
{
20692071
ktime_t exp = ktime_add_us(ktime_get(), min);
20702072
u64 delta = (u64)(max - min) * NSEC_PER_USEC;
20712073

20722074
for (;;) {
2073-
__set_current_state(TASK_UNINTERRUPTIBLE);
2075+
__set_current_state(state);
20742076
/* Do not return before the requested sleep time has elapsed */
20752077
if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
20762078
break;
20772079
}
20782080
}
2079-
EXPORT_SYMBOL(usleep_range);
2081+
EXPORT_SYMBOL(usleep_range_state);

mm/backing-dev.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi)
945945
wb_shutdown(&bdi->wb);
946946
cgwb_bdi_unregister(bdi);
947947

948+
/*
949+
* If this BDI's min ratio has been set, use bdi_set_min_ratio() to
950+
* update the global bdi_min_ratio.
951+
*/
952+
if (bdi->min_ratio)
953+
bdi_set_min_ratio(bdi, 0);
954+
948955
if (bdi->dev) {
949956
bdi_debug_unregister(bdi);
950957
device_unregister(bdi->dev);

mm/damon/core.c

Lines changed: 7 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,6 @@ int damon_set_targets(struct damon_ctx *ctx,
282282
for (i = 0; i < nr_ids; i++) {
283283
t = damon_new_target(ids[i]);
284284
if (!t) {
285-
pr_err("Failed to alloc damon_target\n");
286285
/* The caller should do cleanup of the ids itself */
287286
damon_for_each_target_safe(t, next, ctx)
288287
damon_destroy_target(t);
@@ -312,16 +311,10 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
312311
unsigned long aggr_int, unsigned long primitive_upd_int,
313312
unsigned long min_nr_reg, unsigned long max_nr_reg)
314313
{
315-
if (min_nr_reg < 3) {
316-
pr_err("min_nr_regions (%lu) must be at least 3\n",
317-
min_nr_reg);
314+
if (min_nr_reg < 3)
318315
return -EINVAL;
319-
}
320-
if (min_nr_reg > max_nr_reg) {
321-
pr_err("invalid nr_regions. min (%lu) > max (%lu)\n",
322-
min_nr_reg, max_nr_reg);
316+
if (min_nr_reg > max_nr_reg)
323317
return -EINVAL;
324-
}
325318

326319
ctx->sample_interval = sample_int;
327320
ctx->aggr_interval = aggr_int;
@@ -980,10 +973,11 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme)
980973

981974
static void kdamond_usleep(unsigned long usecs)
982975
{
983-
if (usecs > 100 * 1000)
984-
schedule_timeout_interruptible(usecs_to_jiffies(usecs));
976+
/* See Documentation/timers/timers-howto.rst for the thresholds */
977+
if (usecs > 20 * USEC_PER_MSEC)
978+
schedule_timeout_idle(usecs_to_jiffies(usecs));
985979
else
986-
usleep_range(usecs, usecs + 1);
980+
usleep_idle_range(usecs, usecs + 1);
987981
}
988982

989983
/* Returns negative error code if it's not activated but should return */
@@ -1038,7 +1032,7 @@ static int kdamond_fn(void *data)
10381032
ctx->callback.after_sampling(ctx))
10391033
done = true;
10401034

1041-
usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
1035+
kdamond_usleep(ctx->sample_interval);
10421036

10431037
if (ctx->primitive.check_accesses)
10441038
max_nr_accesses = ctx->primitive.check_accesses(ctx);

mm/damon/dbgfs.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -210,10 +210,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
210210
&wmarks.low, &parsed);
211211
if (ret != 18)
212212
break;
213-
if (!damos_action_valid(action)) {
214-
pr_err("wrong action %d\n", action);
213+
if (!damos_action_valid(action))
215214
goto fail;
216-
}
217215

218216
pos += parsed;
219217
scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,

mm/damon/vaddr-test.h

Lines changed: 37 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
135135
struct damon_addr_range *three_regions,
136136
unsigned long *expected, int nr_expected)
137137
{
138-
struct damon_ctx *ctx = damon_new_ctx();
139138
struct damon_target *t;
140139
struct damon_region *r;
141140
int i;
@@ -145,7 +144,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
145144
r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
146145
damon_add_region(r, t);
147146
}
148-
damon_add_target(ctx, t);
149147

150148
damon_va_apply_three_regions(t, three_regions);
151149

@@ -154,8 +152,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
154152
KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
155153
KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
156154
}
157-
158-
damon_destroy_ctx(ctx);
159155
}
160156

161157
/*
@@ -252,60 +248,59 @@ static void damon_test_apply_three_regions4(struct kunit *test)
252248
new_three_regions, expected, ARRAY_SIZE(expected));
253249
}
254250

255-
static void damon_test_split_evenly(struct kunit *test)
251+
static void damon_test_split_evenly_fail(struct kunit *test,
252+
unsigned long start, unsigned long end, unsigned int nr_pieces)
256253
{
257-
struct damon_ctx *c = damon_new_ctx();
258-
struct damon_target *t;
259-
struct damon_region *r;
260-
unsigned long i;
261-
262-
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
263-
-EINVAL);
264-
265-
t = damon_new_target(42);
266-
r = damon_new_region(0, 100);
267-
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL);
254+
struct damon_target *t = damon_new_target(42);
255+
struct damon_region *r = damon_new_region(start, end);
268256

269257
damon_add_region(r, t);
270-
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0);
271-
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u);
258+
KUNIT_EXPECT_EQ(test,
259+
damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL);
260+
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
272261

273-
i = 0;
274262
damon_for_each_region(r, t) {
275-
KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10);
276-
KUNIT_EXPECT_EQ(test, r->ar.end, i * 10);
263+
KUNIT_EXPECT_EQ(test, r->ar.start, start);
264+
KUNIT_EXPECT_EQ(test, r->ar.end, end);
277265
}
266+
278267
damon_free_target(t);
268+
}
269+
270+
static void damon_test_split_evenly_succ(struct kunit *test,
271+
unsigned long start, unsigned long end, unsigned int nr_pieces)
272+
{
273+
struct damon_target *t = damon_new_target(42);
274+
struct damon_region *r = damon_new_region(start, end);
275+
unsigned long expected_width = (end - start) / nr_pieces;
276+
unsigned long i = 0;
279277

280-
t = damon_new_target(42);
281-
r = damon_new_region(5, 59);
282278
damon_add_region(r, t);
283-
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0);
284-
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
279+
KUNIT_EXPECT_EQ(test,
280+
damon_va_evenly_split_region(t, r, nr_pieces), 0);
281+
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces);
285282

286-
i = 0;
287283
damon_for_each_region(r, t) {
288-
if (i == 4)
284+
if (i == nr_pieces - 1)
289285
break;
290-
KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++);
291-
KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i);
286+
KUNIT_EXPECT_EQ(test,
287+
r->ar.start, start + i++ * expected_width);
288+
KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width);
292289
}
293-
KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i);
294-
KUNIT_EXPECT_EQ(test, r->ar.end, 59ul);
290+
KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width);
291+
KUNIT_EXPECT_EQ(test, r->ar.end, end);
295292
damon_free_target(t);
293+
}
296294

297-
t = damon_new_target(42);
298-
r = damon_new_region(5, 6);
299-
damon_add_region(r, t);
300-
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL);
301-
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
295+
static void damon_test_split_evenly(struct kunit *test)
296+
{
297+
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
298+
-EINVAL);
302299

303-
damon_for_each_region(r, t) {
304-
KUNIT_EXPECT_EQ(test, r->ar.start, 5ul);
305-
KUNIT_EXPECT_EQ(test, r->ar.end, 6ul);
306-
}
307-
damon_free_target(t);
308-
damon_destroy_ctx(c);
300+
damon_test_split_evenly_fail(test, 0, 100, 0);
301+
damon_test_split_evenly_succ(test, 0, 100, 10);
302+
damon_test_split_evenly_succ(test, 5, 59, 5);
303+
damon_test_split_evenly_fail(test, 5, 6, 2);
309304
}
310305

311306
static struct kunit_case damon_test_cases[] = {

mm/damon/vaddr.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -627,7 +627,6 @@ int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t,
627627
case DAMOS_STAT:
628628
return 0;
629629
default:
630-
pr_warn("Wrong action %d\n", scheme->action);
631630
return -EINVAL;
632631
}
633632

0 commit comments

Comments
 (0)