25
25
* to the last. It would be better if bind would truly restrict
26
26
* the allocation to memory nodes instead
27
27
*
28
- * preferred Try a specific node first before normal fallback.
28
+ * preferred Try a specific node first before normal fallback.
29
29
* As a special case NUMA_NO_NODE here means do the allocation
30
30
* on the local CPU. This is normally identical to default,
31
31
* but useful to set in a VMA when you have a non default
52
52
* on systems with highmem kernel lowmem allocation don't get policied.
53
53
* Same with GFP_DMA allocations.
54
54
*
55
- * For shmfs /tmpfs/hugetlbfs shared memory the policy is shared between
55
+ * For shmem /tmpfs shared memory the policy is shared between
56
56
* all users and remembered even when nobody has memory mapped.
57
57
*/
58
58
@@ -291,6 +291,7 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
291
291
return ERR_PTR (- EINVAL );
292
292
} else if (nodes_empty (* nodes ))
293
293
return ERR_PTR (- EINVAL );
294
+
294
295
policy = kmem_cache_alloc (policy_cache , GFP_KERNEL );
295
296
if (!policy )
296
297
return ERR_PTR (- ENOMEM );
@@ -303,11 +304,11 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
303
304
}
304
305
305
306
/* Slow path of a mpol destructor. */
306
- void __mpol_put (struct mempolicy * p )
307
+ void __mpol_put (struct mempolicy * pol )
307
308
{
308
- if (!atomic_dec_and_test (& p -> refcnt ))
309
+ if (!atomic_dec_and_test (& pol -> refcnt ))
309
310
return ;
310
- kmem_cache_free (policy_cache , p );
311
+ kmem_cache_free (policy_cache , pol );
311
312
}
312
313
313
314
static void mpol_rebind_default (struct mempolicy * pol , const nodemask_t * nodes )
@@ -364,7 +365,6 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
364
365
*
365
366
* Called with task's alloc_lock held.
366
367
*/
367
-
368
368
void mpol_rebind_task (struct task_struct * tsk , const nodemask_t * new )
369
369
{
370
370
mpol_rebind_policy (tsk -> mempolicy , new );
@@ -375,7 +375,6 @@ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
375
375
*
376
376
* Call holding a reference to mm. Takes mm->mmap_lock during call.
377
377
*/
378
-
379
378
void mpol_rebind_mm (struct mm_struct * mm , nodemask_t * new )
380
379
{
381
380
struct vm_area_struct * vma ;
@@ -757,7 +756,7 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
757
756
* This must be called with the mmap_lock held for writing.
758
757
*/
759
758
static int vma_replace_policy (struct vm_area_struct * vma ,
760
- struct mempolicy * pol )
759
+ struct mempolicy * pol )
761
760
{
762
761
int err ;
763
762
struct mempolicy * old ;
@@ -800,7 +799,7 @@ static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
800
799
vmstart = vma -> vm_start ;
801
800
}
802
801
803
- if (mpol_equal (vma_policy ( vma ) , new_pol )) {
802
+ if (mpol_equal (vma -> vm_policy , new_pol )) {
804
803
* prev = vma ;
805
804
return 0 ;
806
805
}
@@ -855,18 +854,18 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
855
854
*
856
855
* Called with task's alloc_lock held
857
856
*/
858
- static void get_policy_nodemask (struct mempolicy * p , nodemask_t * nodes )
857
+ static void get_policy_nodemask (struct mempolicy * pol , nodemask_t * nodes )
859
858
{
860
859
nodes_clear (* nodes );
861
- if (p == & default_policy )
860
+ if (pol == & default_policy )
862
861
return ;
863
862
864
- switch (p -> mode ) {
863
+ switch (pol -> mode ) {
865
864
case MPOL_BIND :
866
865
case MPOL_INTERLEAVE :
867
866
case MPOL_PREFERRED :
868
867
case MPOL_PREFERRED_MANY :
869
- * nodes = p -> nodes ;
868
+ * nodes = pol -> nodes ;
870
869
break ;
871
870
case MPOL_LOCAL :
872
871
/* return empty node mask for local allocation */
@@ -1634,7 +1633,6 @@ static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1634
1633
out_put :
1635
1634
put_task_struct (task );
1636
1635
goto out ;
1637
-
1638
1636
}
1639
1637
1640
1638
SYSCALL_DEFINE4 (migrate_pages , pid_t , pid , unsigned long , maxnode ,
@@ -1644,7 +1642,6 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1644
1642
return kernel_migrate_pages (pid , maxnode , old_nodes , new_nodes );
1645
1643
}
1646
1644
1647
-
1648
1645
/* Retrieve NUMA policy */
1649
1646
static int kernel_get_mempolicy (int __user * policy ,
1650
1647
unsigned long __user * nmask ,
@@ -1827,10 +1824,10 @@ nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1827
1824
* policy_node() is always coupled with policy_nodemask(), which
1828
1825
* secures the nodemask limit for 'bind' and 'prefer-many' policy.
1829
1826
*/
1830
- static int policy_node (gfp_t gfp , struct mempolicy * policy , int nd )
1827
+ static int policy_node (gfp_t gfp , struct mempolicy * policy , int nid )
1831
1828
{
1832
1829
if (policy -> mode == MPOL_PREFERRED ) {
1833
- nd = first_node (policy -> nodes );
1830
+ nid = first_node (policy -> nodes );
1834
1831
} else {
1835
1832
/*
1836
1833
* __GFP_THISNODE shouldn't even be used with the bind policy
@@ -1845,19 +1842,18 @@ static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1845
1842
policy -> home_node != NUMA_NO_NODE )
1846
1843
return policy -> home_node ;
1847
1844
1848
- return nd ;
1845
+ return nid ;
1849
1846
}
1850
1847
1851
1848
/* Do dynamic interleaving for a process */
1852
- static unsigned interleave_nodes (struct mempolicy * policy )
1849
+ static unsigned int interleave_nodes (struct mempolicy * policy )
1853
1850
{
1854
- unsigned next ;
1855
- struct task_struct * me = current ;
1851
+ unsigned int nid ;
1856
1852
1857
- next = next_node_in (me -> il_prev , policy -> nodes );
1858
- if (next < MAX_NUMNODES )
1859
- me -> il_prev = next ;
1860
- return next ;
1853
+ nid = next_node_in (current -> il_prev , policy -> nodes );
1854
+ if (nid < MAX_NUMNODES )
1855
+ current -> il_prev = nid ;
1856
+ return nid ;
1861
1857
}
1862
1858
1863
1859
/*
@@ -2347,7 +2343,7 @@ unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2347
2343
2348
2344
int vma_dup_policy (struct vm_area_struct * src , struct vm_area_struct * dst )
2349
2345
{
2350
- struct mempolicy * pol = mpol_dup (vma_policy ( src ) );
2346
+ struct mempolicy * pol = mpol_dup (src -> vm_policy );
2351
2347
2352
2348
if (IS_ERR (pol ))
2353
2349
return PTR_ERR (pol );
@@ -2771,40 +2767,40 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2771
2767
}
2772
2768
}
2773
2769
2774
- int mpol_set_shared_policy (struct shared_policy * info ,
2775
- struct vm_area_struct * vma , struct mempolicy * npol )
2770
+ int mpol_set_shared_policy (struct shared_policy * sp ,
2771
+ struct vm_area_struct * vma , struct mempolicy * pol )
2776
2772
{
2777
2773
int err ;
2778
2774
struct sp_node * new = NULL ;
2779
2775
unsigned long sz = vma_pages (vma );
2780
2776
2781
- if (npol ) {
2782
- new = sp_alloc (vma -> vm_pgoff , vma -> vm_pgoff + sz , npol );
2777
+ if (pol ) {
2778
+ new = sp_alloc (vma -> vm_pgoff , vma -> vm_pgoff + sz , pol );
2783
2779
if (!new )
2784
2780
return - ENOMEM ;
2785
2781
}
2786
- err = shared_policy_replace (info , vma -> vm_pgoff , vma -> vm_pgoff + sz , new );
2782
+ err = shared_policy_replace (sp , vma -> vm_pgoff , vma -> vm_pgoff + sz , new );
2787
2783
if (err && new )
2788
2784
sp_free (new );
2789
2785
return err ;
2790
2786
}
2791
2787
2792
2788
/* Free a backing policy store on inode delete. */
2793
- void mpol_free_shared_policy (struct shared_policy * p )
2789
+ void mpol_free_shared_policy (struct shared_policy * sp )
2794
2790
{
2795
2791
struct sp_node * n ;
2796
2792
struct rb_node * next ;
2797
2793
2798
- if (!p -> root .rb_node )
2794
+ if (!sp -> root .rb_node )
2799
2795
return ;
2800
- write_lock (& p -> lock );
2801
- next = rb_first (& p -> root );
2796
+ write_lock (& sp -> lock );
2797
+ next = rb_first (& sp -> root );
2802
2798
while (next ) {
2803
2799
n = rb_entry (next , struct sp_node , nd );
2804
2800
next = rb_next (& n -> nd );
2805
- sp_delete (p , n );
2801
+ sp_delete (sp , n );
2806
2802
}
2807
- write_unlock (& p -> lock );
2803
+ write_unlock (& sp -> lock );
2808
2804
}
2809
2805
2810
2806
#ifdef CONFIG_NUMA_BALANCING
@@ -2854,7 +2850,6 @@ static inline void __init check_numabalancing_enable(void)
2854
2850
}
2855
2851
#endif /* CONFIG_NUMA_BALANCING */
2856
2852
2857
- /* assumes fs == KERNEL_DS */
2858
2853
void __init numa_policy_init (void )
2859
2854
{
2860
2855
nodemask_t interleave_nodes ;
@@ -2917,7 +2912,6 @@ void numa_default_policy(void)
2917
2912
/*
2918
2913
* Parse and format mempolicy from/to strings
2919
2914
*/
2920
-
2921
2915
static const char * const policy_modes [] =
2922
2916
{
2923
2917
[MPOL_DEFAULT ] = "default" ,
@@ -2928,7 +2922,6 @@ static const char * const policy_modes[] =
2928
2922
[MPOL_PREFERRED_MANY ] = "prefer (many)" ,
2929
2923
};
2930
2924
2931
-
2932
2925
#ifdef CONFIG_TMPFS
2933
2926
/**
2934
2927
* mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
0 commit comments