|
10 | 10 | #include <linux/damon.h>
|
11 | 11 | #include <linux/delay.h>
|
12 | 12 | #include <linux/kthread.h>
|
| 13 | +#include <linux/mm.h> |
13 | 14 | #include <linux/random.h>
|
14 | 15 | #include <linux/slab.h>
|
15 | 16 | #include <linux/string.h>
|
@@ -90,7 +91,8 @@ struct damos *damon_new_scheme(
|
90 | 91 | unsigned long min_sz_region, unsigned long max_sz_region,
|
91 | 92 | unsigned int min_nr_accesses, unsigned int max_nr_accesses,
|
92 | 93 | unsigned int min_age_region, unsigned int max_age_region,
|
93 |
| - enum damos_action action, struct damos_quota *quota) |
| 94 | + enum damos_action action, struct damos_quota *quota, |
| 95 | + struct damos_watermarks *wmarks) |
94 | 96 | {
|
95 | 97 | struct damos *scheme;
|
96 | 98 |
|
@@ -122,6 +124,13 @@ struct damos *damon_new_scheme(
|
122 | 124 | scheme->quota.charge_target_from = NULL;
|
123 | 125 | scheme->quota.charge_addr_from = 0;
|
124 | 126 |
|
| 127 | + scheme->wmarks.metric = wmarks->metric; |
| 128 | + scheme->wmarks.interval = wmarks->interval; |
| 129 | + scheme->wmarks.high = wmarks->high; |
| 130 | + scheme->wmarks.mid = wmarks->mid; |
| 131 | + scheme->wmarks.low = wmarks->low; |
| 132 | + scheme->wmarks.activated = true; |
| 133 | + |
125 | 134 | return scheme;
|
126 | 135 | }
|
127 | 136 |
|
@@ -582,6 +591,9 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
|
582 | 591 | unsigned long sz = r->ar.end - r->ar.start;
|
583 | 592 | struct timespec64 begin, end;
|
584 | 593 |
|
| 594 | + if (!s->wmarks.activated) |
| 595 | + continue; |
| 596 | + |
585 | 597 | /* Check the quota */
|
586 | 598 | if (quota->esz && quota->charged_sz >= quota->esz)
|
587 | 599 | continue;
|
@@ -684,6 +696,9 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
|
684 | 696 | unsigned long cumulated_sz;
|
685 | 697 | unsigned int score, max_score = 0;
|
686 | 698 |
|
| 699 | + if (!s->wmarks.activated) |
| 700 | + continue; |
| 701 | + |
687 | 702 | if (!quota->ms && !quota->sz)
|
688 | 703 | continue;
|
689 | 704 |
|
@@ -924,6 +939,83 @@ static bool kdamond_need_stop(struct damon_ctx *ctx)
|
924 | 939 | return true;
|
925 | 940 | }
|
926 | 941 |
|
| 942 | +static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric) |
| 943 | +{ |
| 944 | + struct sysinfo i; |
| 945 | + |
| 946 | + switch (metric) { |
| 947 | + case DAMOS_WMARK_FREE_MEM_RATE: |
| 948 | + si_meminfo(&i); |
| 949 | + return i.freeram * 1000 / i.totalram; |
| 950 | + default: |
| 951 | + break; |
| 952 | + } |
| 953 | + return -EINVAL; |
| 954 | +} |
| 955 | + |
| 956 | +/* |
| 957 | + * Returns zero if the scheme is active. Else, returns time to wait for next |
| 958 | + * watermark check in micro-seconds. |
| 959 | + */ |
| 960 | +static unsigned long damos_wmark_wait_us(struct damos *scheme) |
| 961 | +{ |
| 962 | + unsigned long metric; |
| 963 | + |
| 964 | + if (scheme->wmarks.metric == DAMOS_WMARK_NONE) |
| 965 | + return 0; |
| 966 | + |
| 967 | + metric = damos_wmark_metric_value(scheme->wmarks.metric); |
| 968 | + /* higher than high watermark or lower than low watermark */ |
| 969 | + if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { |
| 970 | + if (scheme->wmarks.activated) |
| 971 | + pr_debug("inactivate a scheme (%d) for %s wmark\n", |
| 972 | + scheme->action, |
| 973 | + metric > scheme->wmarks.high ? |
| 974 | + "high" : "low"); |
| 975 | + scheme->wmarks.activated = false; |
| 976 | + return scheme->wmarks.interval; |
| 977 | + } |
| 978 | + |
| 979 | + /* inactive and higher than middle watermark */ |
| 980 | + if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && |
| 981 | + !scheme->wmarks.activated) |
| 982 | + return scheme->wmarks.interval; |
| 983 | + |
| 984 | + if (!scheme->wmarks.activated) |
| 985 | + pr_debug("activate a scheme (%d)\n", scheme->action); |
| 986 | + scheme->wmarks.activated = true; |
| 987 | + return 0; |
| 988 | +} |
| 989 | + |
| 990 | +static void kdamond_usleep(unsigned long usecs) |
| 991 | +{ |
| 992 | + if (usecs > 100 * 1000) |
| 993 | + schedule_timeout_interruptible(usecs_to_jiffies(usecs)); |
| 994 | + else |
| 995 | + usleep_range(usecs, usecs + 1); |
| 996 | +} |
| 997 | + |
| 998 | +/* Returns negative error code if it's not activated but should return */ |
| 999 | +static int kdamond_wait_activation(struct damon_ctx *ctx) |
| 1000 | +{ |
| 1001 | + struct damos *s; |
| 1002 | + unsigned long wait_time; |
| 1003 | + unsigned long min_wait_time = 0; |
| 1004 | + |
| 1005 | + while (!kdamond_need_stop(ctx)) { |
| 1006 | + damon_for_each_scheme(s, ctx) { |
| 1007 | + wait_time = damos_wmark_wait_us(s); |
| 1008 | + if (!min_wait_time || wait_time < min_wait_time) |
| 1009 | + min_wait_time = wait_time; |
| 1010 | + } |
| 1011 | + if (!min_wait_time) |
| 1012 | + return 0; |
| 1013 | + |
| 1014 | + kdamond_usleep(min_wait_time); |
| 1015 | + } |
| 1016 | + return -EBUSY; |
| 1017 | +} |
| 1018 | + |
927 | 1019 | static void set_kdamond_stop(struct damon_ctx *ctx)
|
928 | 1020 | {
|
929 | 1021 | mutex_lock(&ctx->kdamond_lock);
|
@@ -952,6 +1044,9 @@ static int kdamond_fn(void *data)
|
952 | 1044 | sz_limit = damon_region_sz_limit(ctx);
|
953 | 1045 |
|
954 | 1046 | while (!kdamond_need_stop(ctx)) {
|
| 1047 | + if (kdamond_wait_activation(ctx)) |
| 1048 | + continue; |
| 1049 | + |
955 | 1050 | if (ctx->primitive.prepare_access_checks)
|
956 | 1051 | ctx->primitive.prepare_access_checks(ctx);
|
957 | 1052 | if (ctx->callback.after_sampling &&
|
|
0 commit comments