@@ -2461,13 +2461,15 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2461
2461
static DEFINE_MUTEX (memcg_limit_mutex );
2462
2462
2463
2463
static int mem_cgroup_resize_limit (struct mem_cgroup * memcg ,
2464
- unsigned long limit )
2464
+ unsigned long limit , bool memsw )
2465
2465
{
2466
2466
unsigned long curusage ;
2467
2467
unsigned long oldusage ;
2468
2468
bool enlarge = false;
2469
2469
int retry_count ;
2470
2470
int ret ;
2471
+ bool limits_invariant ;
2472
+ struct page_counter * counter = memsw ? & memcg -> memsw : & memcg -> memory ;
2471
2473
2472
2474
/*
2473
2475
* For keeping hierarchical_reclaim simple, how long we should retry
@@ -2477,7 +2479,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2477
2479
retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2478
2480
mem_cgroup_count_children (memcg );
2479
2481
2480
- oldusage = page_counter_read (& memcg -> memory );
2482
+ oldusage = page_counter_read (counter );
2481
2483
2482
2484
do {
2483
2485
if (signal_pending (current )) {
@@ -2486,73 +2488,28 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2486
2488
}
2487
2489
2488
2490
mutex_lock (& memcg_limit_mutex );
2489
- if (limit > memcg -> memsw .limit ) {
2490
- mutex_unlock (& memcg_limit_mutex );
2491
- ret = - EINVAL ;
2492
- break ;
2493
- }
2494
- if (limit > memcg -> memory .limit )
2495
- enlarge = true;
2496
- ret = page_counter_limit (& memcg -> memory , limit );
2497
- mutex_unlock (& memcg_limit_mutex );
2498
-
2499
- if (!ret )
2500
- break ;
2501
-
2502
- try_to_free_mem_cgroup_pages (memcg , 1 , GFP_KERNEL , true);
2503
-
2504
- curusage = page_counter_read (& memcg -> memory );
2505
- /* Usage is reduced ? */
2506
- if (curusage >= oldusage )
2507
- retry_count -- ;
2508
- else
2509
- oldusage = curusage ;
2510
- } while (retry_count );
2511
-
2512
- if (!ret && enlarge )
2513
- memcg_oom_recover (memcg );
2514
-
2515
- return ret ;
2516
- }
2517
-
2518
- static int mem_cgroup_resize_memsw_limit (struct mem_cgroup * memcg ,
2519
- unsigned long limit )
2520
- {
2521
- unsigned long curusage ;
2522
- unsigned long oldusage ;
2523
- bool enlarge = false;
2524
- int retry_count ;
2525
- int ret ;
2526
-
2527
- /* see mem_cgroup_resize_res_limit */
2528
- retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2529
- mem_cgroup_count_children (memcg );
2530
-
2531
- oldusage = page_counter_read (& memcg -> memsw );
2532
-
2533
- do {
2534
- if (signal_pending (current )) {
2535
- ret = - EINTR ;
2536
- break ;
2537
- }
2538
-
2539
- mutex_lock (& memcg_limit_mutex );
2540
- if (limit < memcg -> memory .limit ) {
2491
+ /*
2492
+ * Make sure that the new limit (memsw or memory limit) doesn't
2493
+ * break our basic invariant rule memory.limit <= memsw.limit.
2494
+ */
2495
+ limits_invariant = memsw ? limit >= memcg -> memory .limit :
2496
+ limit <= memcg -> memsw .limit ;
2497
+ if (!limits_invariant ) {
2541
2498
mutex_unlock (& memcg_limit_mutex );
2542
2499
ret = - EINVAL ;
2543
2500
break ;
2544
2501
}
2545
- if (limit > memcg -> memsw . limit )
2502
+ if (limit > counter -> limit )
2546
2503
enlarge = true;
2547
- ret = page_counter_limit (& memcg -> memsw , limit );
2504
+ ret = page_counter_limit (counter , limit );
2548
2505
mutex_unlock (& memcg_limit_mutex );
2549
2506
2550
2507
if (!ret )
2551
2508
break ;
2552
2509
2553
- try_to_free_mem_cgroup_pages (memcg , 1 , GFP_KERNEL , false );
2510
+ try_to_free_mem_cgroup_pages (memcg , 1 , GFP_KERNEL , ! memsw );
2554
2511
2555
- curusage = page_counter_read (& memcg -> memsw );
2512
+ curusage = page_counter_read (counter );
2556
2513
/* Usage is reduced ? */
2557
2514
if (curusage >= oldusage )
2558
2515
retry_count -- ;
@@ -3014,10 +2971,10 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3014
2971
}
3015
2972
switch (MEMFILE_TYPE (of_cft (of )-> private )) {
3016
2973
case _MEM :
3017
- ret = mem_cgroup_resize_limit (memcg , nr_pages );
2974
+ ret = mem_cgroup_resize_limit (memcg , nr_pages , false );
3018
2975
break ;
3019
2976
case _MEMSWAP :
3020
- ret = mem_cgroup_resize_memsw_limit (memcg , nr_pages );
2977
+ ret = mem_cgroup_resize_limit (memcg , nr_pages , true );
3021
2978
break ;
3022
2979
case _KMEM :
3023
2980
ret = memcg_update_kmem_limit (memcg , nr_pages );
0 commit comments