Skip to content

Commit c36f6e6

Browse files
Hugh Dickinsakpm00
authored andcommitted
mempolicy trivia: slightly more consistent naming
Before getting down to work, do a little cleanup, mainly of inconsistent variable naming. I gave up trying to rationalize mpol versus pol versus policy, and node versus nid, but let's avoid p and nd. Remove a few superfluous blank lines, but add one; and here prefer vma->vm_policy to vma_policy(vma) - the latter being appropriate in other sources, which have to allow for !CONFIG_NUMA. That intriguing line about KERNEL_DS? should have gone in v2.6.15, when numa_policy_init() stopped using set_mempolicy(2)'s system call handler. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Hugh Dickins <[email protected]> Reviewed-by: Matthew Wilcox (Oracle) <[email protected]> Cc: Andi Kleen <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Greg Kroah-Hartman <[email protected]> Cc: "Huang, Ying" <[email protected]> Cc: Kefeng Wang <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Mike Kravetz <[email protected]> Cc: Nhat Pham <[email protected]> Cc: Sidhartha Kumar <[email protected]> Cc: Suren Baghdasaryan <[email protected]> Cc: Tejun heo <[email protected]> Cc: Vishal Moola (Oracle) <[email protected]> Cc: Yang Shi <[email protected]> Cc: Yosry Ahmed <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 7f1ee4e commit c36f6e6

File tree

2 files changed

+38
-46
lines changed

2 files changed

+38
-46
lines changed

include/linux/mempolicy.h

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -124,10 +124,9 @@ struct shared_policy {
124124

125125
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
126126
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
127-
int mpol_set_shared_policy(struct shared_policy *info,
128-
struct vm_area_struct *vma,
129-
struct mempolicy *new);
130-
void mpol_free_shared_policy(struct shared_policy *p);
127+
int mpol_set_shared_policy(struct shared_policy *sp,
128+
struct vm_area_struct *vma, struct mempolicy *mpol);
129+
void mpol_free_shared_policy(struct shared_policy *sp);
131130
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
132131
unsigned long idx);
133132

@@ -191,7 +190,7 @@ static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
191190
return true;
192191
}
193192

194-
static inline void mpol_put(struct mempolicy *p)
193+
static inline void mpol_put(struct mempolicy *pol)
195194
{
196195
}
197196

@@ -210,7 +209,7 @@ static inline void mpol_shared_policy_init(struct shared_policy *sp,
210209
{
211210
}
212211

213-
static inline void mpol_free_shared_policy(struct shared_policy *p)
212+
static inline void mpol_free_shared_policy(struct shared_policy *sp)
214213
{
215214
}
216215

mm/mempolicy.c

Lines changed: 33 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
* to the last. It would be better if bind would truly restrict
2626
* the allocation to memory nodes instead
2727
*
28-
* preferred Try a specific node first before normal fallback.
28+
* preferred Try a specific node first before normal fallback.
2929
* As a special case NUMA_NO_NODE here means do the allocation
3030
* on the local CPU. This is normally identical to default,
3131
* but useful to set in a VMA when you have a non default
@@ -52,7 +52,7 @@
5252
* on systems with highmem kernel lowmem allocation don't get policied.
5353
* Same with GFP_DMA allocations.
5454
*
55-
* For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
55+
* For shmem/tmpfs shared memory the policy is shared between
5656
* all users and remembered even when nobody has memory mapped.
5757
*/
5858

@@ -291,6 +291,7 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
291291
return ERR_PTR(-EINVAL);
292292
} else if (nodes_empty(*nodes))
293293
return ERR_PTR(-EINVAL);
294+
294295
policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
295296
if (!policy)
296297
return ERR_PTR(-ENOMEM);
@@ -303,11 +304,11 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
303304
}
304305

305306
/* Slow path of a mpol destructor. */
306-
void __mpol_put(struct mempolicy *p)
307+
void __mpol_put(struct mempolicy *pol)
307308
{
308-
if (!atomic_dec_and_test(&p->refcnt))
309+
if (!atomic_dec_and_test(&pol->refcnt))
309310
return;
310-
kmem_cache_free(policy_cache, p);
311+
kmem_cache_free(policy_cache, pol);
311312
}
312313

313314
static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
@@ -364,7 +365,6 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
364365
*
365366
* Called with task's alloc_lock held.
366367
*/
367-
368368
void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
369369
{
370370
mpol_rebind_policy(tsk->mempolicy, new);
@@ -375,7 +375,6 @@ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
375375
*
376376
* Call holding a reference to mm. Takes mm->mmap_lock during call.
377377
*/
378-
379378
void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
380379
{
381380
struct vm_area_struct *vma;
@@ -757,7 +756,7 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
757756
* This must be called with the mmap_lock held for writing.
758757
*/
759758
static int vma_replace_policy(struct vm_area_struct *vma,
760-
struct mempolicy *pol)
759+
struct mempolicy *pol)
761760
{
762761
int err;
763762
struct mempolicy *old;
@@ -800,7 +799,7 @@ static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
800799
vmstart = vma->vm_start;
801800
}
802801

803-
if (mpol_equal(vma_policy(vma), new_pol)) {
802+
if (mpol_equal(vma->vm_policy, new_pol)) {
804803
*prev = vma;
805804
return 0;
806805
}
@@ -855,18 +854,18 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
855854
*
856855
* Called with task's alloc_lock held
857856
*/
858-
static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
857+
static void get_policy_nodemask(struct mempolicy *pol, nodemask_t *nodes)
859858
{
860859
nodes_clear(*nodes);
861-
if (p == &default_policy)
860+
if (pol == &default_policy)
862861
return;
863862

864-
switch (p->mode) {
863+
switch (pol->mode) {
865864
case MPOL_BIND:
866865
case MPOL_INTERLEAVE:
867866
case MPOL_PREFERRED:
868867
case MPOL_PREFERRED_MANY:
869-
*nodes = p->nodes;
868+
*nodes = pol->nodes;
870869
break;
871870
case MPOL_LOCAL:
872871
/* return empty node mask for local allocation */
@@ -1634,7 +1633,6 @@ static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
16341633
out_put:
16351634
put_task_struct(task);
16361635
goto out;
1637-
16381636
}
16391637

16401638
SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
@@ -1644,7 +1642,6 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
16441642
return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
16451643
}
16461644

1647-
16481645
/* Retrieve NUMA policy */
16491646
static int kernel_get_mempolicy(int __user *policy,
16501647
unsigned long __user *nmask,
@@ -1827,10 +1824,10 @@ nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
18271824
* policy_node() is always coupled with policy_nodemask(), which
18281825
* secures the nodemask limit for 'bind' and 'prefer-many' policy.
18291826
*/
1830-
static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1827+
static int policy_node(gfp_t gfp, struct mempolicy *policy, int nid)
18311828
{
18321829
if (policy->mode == MPOL_PREFERRED) {
1833-
nd = first_node(policy->nodes);
1830+
nid = first_node(policy->nodes);
18341831
} else {
18351832
/*
18361833
* __GFP_THISNODE shouldn't even be used with the bind policy
@@ -1845,19 +1842,18 @@ static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
18451842
policy->home_node != NUMA_NO_NODE)
18461843
return policy->home_node;
18471844

1848-
return nd;
1845+
return nid;
18491846
}
18501847

18511848
/* Do dynamic interleaving for a process */
1852-
static unsigned interleave_nodes(struct mempolicy *policy)
1849+
static unsigned int interleave_nodes(struct mempolicy *policy)
18531850
{
1854-
unsigned next;
1855-
struct task_struct *me = current;
1851+
unsigned int nid;
18561852

1857-
next = next_node_in(me->il_prev, policy->nodes);
1858-
if (next < MAX_NUMNODES)
1859-
me->il_prev = next;
1860-
return next;
1853+
nid = next_node_in(current->il_prev, policy->nodes);
1854+
if (nid < MAX_NUMNODES)
1855+
current->il_prev = nid;
1856+
return nid;
18611857
}
18621858

18631859
/*
@@ -2347,7 +2343,7 @@ unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
23472343

23482344
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
23492345
{
2350-
struct mempolicy *pol = mpol_dup(vma_policy(src));
2346+
struct mempolicy *pol = mpol_dup(src->vm_policy);
23512347

23522348
if (IS_ERR(pol))
23532349
return PTR_ERR(pol);
@@ -2771,40 +2767,40 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
27712767
}
27722768
}
27732769

2774-
int mpol_set_shared_policy(struct shared_policy *info,
2775-
struct vm_area_struct *vma, struct mempolicy *npol)
2770+
int mpol_set_shared_policy(struct shared_policy *sp,
2771+
struct vm_area_struct *vma, struct mempolicy *pol)
27762772
{
27772773
int err;
27782774
struct sp_node *new = NULL;
27792775
unsigned long sz = vma_pages(vma);
27802776

2781-
if (npol) {
2782-
new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2777+
if (pol) {
2778+
new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, pol);
27832779
if (!new)
27842780
return -ENOMEM;
27852781
}
2786-
err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2782+
err = shared_policy_replace(sp, vma->vm_pgoff, vma->vm_pgoff + sz, new);
27872783
if (err && new)
27882784
sp_free(new);
27892785
return err;
27902786
}
27912787

27922788
/* Free a backing policy store on inode delete. */
2793-
void mpol_free_shared_policy(struct shared_policy *p)
2789+
void mpol_free_shared_policy(struct shared_policy *sp)
27942790
{
27952791
struct sp_node *n;
27962792
struct rb_node *next;
27972793

2798-
if (!p->root.rb_node)
2794+
if (!sp->root.rb_node)
27992795
return;
2800-
write_lock(&p->lock);
2801-
next = rb_first(&p->root);
2796+
write_lock(&sp->lock);
2797+
next = rb_first(&sp->root);
28022798
while (next) {
28032799
n = rb_entry(next, struct sp_node, nd);
28042800
next = rb_next(&n->nd);
2805-
sp_delete(p, n);
2801+
sp_delete(sp, n);
28062802
}
2807-
write_unlock(&p->lock);
2803+
write_unlock(&sp->lock);
28082804
}
28092805

28102806
#ifdef CONFIG_NUMA_BALANCING
@@ -2854,7 +2850,6 @@ static inline void __init check_numabalancing_enable(void)
28542850
}
28552851
#endif /* CONFIG_NUMA_BALANCING */
28562852

2857-
/* assumes fs == KERNEL_DS */
28582853
void __init numa_policy_init(void)
28592854
{
28602855
nodemask_t interleave_nodes;
@@ -2917,7 +2912,6 @@ void numa_default_policy(void)
29172912
/*
29182913
* Parse and format mempolicy from/to strings
29192914
*/
2920-
29212915
static const char * const policy_modes[] =
29222916
{
29232917
[MPOL_DEFAULT] = "default",
@@ -2928,7 +2922,6 @@ static const char * const policy_modes[] =
29282922
[MPOL_PREFERRED_MANY] = "prefer (many)",
29292923
};
29302924

2931-
29322925
#ifdef CONFIG_TMPFS
29332926
/**
29342927
* mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.

0 commit comments

Comments
 (0)