@@ -781,7 +781,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
781
781
int slab_node = slab_nid (virt_to_slab (objp ));
782
782
int node = numa_mem_id ();
783
783
/*
784
- * Make sure we are not freeing a object from another node to the array
784
+ * Make sure we are not freeing an object from another node to the array
785
785
* cache on this cpu.
786
786
*/
787
787
if (likely (node == slab_node ))
@@ -832,7 +832,7 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
832
832
833
833
/*
834
834
* The kmem_cache_nodes don't come and go as CPUs
835
- * come and go. slab_mutex is sufficient
835
+ * come and go. slab_mutex provides sufficient
836
836
* protection here.
837
837
*/
838
838
cachep -> node [node ] = n ;
@@ -845,7 +845,7 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
845
845
* Allocates and initializes node for a node on each slab cache, used for
846
846
* either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
847
847
* will be allocated off-node since memory is not yet online for the new node.
848
- * When hotplugging memory or a cpu, existing node are not replaced if
848
+ * When hotplugging memory or a cpu, existing nodes are not replaced if
849
849
* already in use.
850
850
*
851
851
* Must hold slab_mutex.
@@ -1046,7 +1046,7 @@ int slab_prepare_cpu(unsigned int cpu)
1046
1046
* offline.
1047
1047
*
1048
1048
* Even if all the cpus of a node are down, we don't free the
1049
- * kmem_cache_node of any cache. This to avoid a race between cpu_down, and
1049
+ * kmem_cache_node of any cache. This is to avoid a race between cpu_down, and
1050
1050
* a kmalloc allocation from another cpu for memory from the node of
1051
1051
* the cpu going down. The kmem_cache_node structure is usually allocated from
1052
1052
* kmem_cache_create() and gets destroyed at kmem_cache_destroy().
@@ -1890,7 +1890,7 @@ static bool set_on_slab_cache(struct kmem_cache *cachep,
1890
1890
* @flags: SLAB flags
1891
1891
*
1892
1892
* Returns a ptr to the cache on success, NULL on failure.
1893
- * Cannot be called within a int, but can be interrupted.
1893
+ * Cannot be called within an int, but can be interrupted.
1894
1894
* The @ctor is run when new pages are allocated by the cache.
1895
1895
*
1896
1896
* The flags are
@@ -3138,7 +3138,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3138
3138
}
3139
3139
3140
3140
/*
3141
- * A interface to enable slab creation on nodeid
3141
+ * An interface to enable slab creation on nodeid
3142
3142
*/
3143
3143
static void * ____cache_alloc_node (struct kmem_cache * cachep , gfp_t flags ,
3144
3144
int nodeid )
0 commit comments