Skip to content

Commit f1157db

Browse files
GONG Ruiqitehcaster
authored andcommitted
slab: Adjust placement of __kvmalloc_node_noprof
Move __kvmalloc_node_noprof (as well as kvfree*, kvrealloc_noprof and kmalloc_gfp_adjust for consistency) into mm/slub.c so that it can directly invoke __do_kmalloc_node, which is needed for the next patch. No functional changes intended. Signed-off-by: GONG Ruiqi <[email protected]> Reviewed-by: Hyeonggon Yoo <[email protected]> Tested-by: Hyeonggon Yoo <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 12f4888 commit f1157db

File tree

2 files changed

+162
-162
lines changed

2 files changed

+162
-162
lines changed

mm/slub.c

Lines changed: 162 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4878,6 +4878,168 @@ void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
48784878
}
48794879
EXPORT_SYMBOL(krealloc_noprof);
48804880

4881+
static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
4882+
{
4883+
/*
4884+
* We want to attempt a large physically contiguous block first because
4885+
* it is less likely to fragment multiple larger blocks and therefore
4886+
* contribute to a long term fragmentation less than vmalloc fallback.
4887+
* However make sure that larger requests are not too disruptive - no
4888+
* OOM killer and no allocation failure warnings as we have a fallback.
4889+
*/
4890+
if (size > PAGE_SIZE) {
4891+
flags |= __GFP_NOWARN;
4892+
4893+
if (!(flags & __GFP_RETRY_MAYFAIL))
4894+
flags |= __GFP_NORETRY;
4895+
4896+
/* nofail semantic is implemented by the vmalloc fallback */
4897+
flags &= ~__GFP_NOFAIL;
4898+
}
4899+
4900+
return flags;
4901+
}
4902+
4903+
/**
4904+
* __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
4905+
* failure, fall back to non-contiguous (vmalloc) allocation.
4906+
* @size: size of the request.
4907+
* @b: which set of kmalloc buckets to allocate from.
4908+
* @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
4909+
* @node: numa node to allocate from
4910+
*
4911+
* Uses kmalloc to get the memory but if the allocation fails then falls back
4912+
* to the vmalloc allocator. Use kvfree for freeing the memory.
4913+
*
4914+
* GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
4915+
* __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
4916+
* preferable to the vmalloc fallback, due to visible performance drawbacks.
4917+
*
4918+
* Return: pointer to the allocated memory of %NULL in case of failure
4919+
*/
4920+
void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
4921+
{
4922+
void *ret;
4923+
4924+
/*
4925+
* It doesn't really make sense to fallback to vmalloc for sub page
4926+
* requests
4927+
*/
4928+
ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b),
4929+
kmalloc_gfp_adjust(flags, size),
4930+
node);
4931+
if (ret || size <= PAGE_SIZE)
4932+
return ret;
4933+
4934+
/* non-sleeping allocations are not supported by vmalloc */
4935+
if (!gfpflags_allow_blocking(flags))
4936+
return NULL;
4937+
4938+
/* Don't even allow crazy sizes */
4939+
if (unlikely(size > INT_MAX)) {
4940+
WARN_ON_ONCE(!(flags & __GFP_NOWARN));
4941+
return NULL;
4942+
}
4943+
4944+
/*
4945+
* kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
4946+
* since the callers already cannot assume anything
4947+
* about the resulting pointer, and cannot play
4948+
* protection games.
4949+
*/
4950+
return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
4951+
flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
4952+
node, __builtin_return_address(0));
4953+
}
4954+
EXPORT_SYMBOL(__kvmalloc_node_noprof);
4955+
4956+
/**
4957+
* kvfree() - Free memory.
4958+
* @addr: Pointer to allocated memory.
4959+
*
4960+
* kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
4961+
* It is slightly more efficient to use kfree() or vfree() if you are certain
4962+
* that you know which one to use.
4963+
*
4964+
* Context: Either preemptible task context or not-NMI interrupt.
4965+
*/
4966+
void kvfree(const void *addr)
4967+
{
4968+
if (is_vmalloc_addr(addr))
4969+
vfree(addr);
4970+
else
4971+
kfree(addr);
4972+
}
4973+
EXPORT_SYMBOL(kvfree);
4974+
4975+
/**
4976+
* kvfree_sensitive - Free a data object containing sensitive information.
4977+
* @addr: address of the data object to be freed.
4978+
* @len: length of the data object.
4979+
*
4980+
* Use the special memzero_explicit() function to clear the content of a
4981+
* kvmalloc'ed object containing sensitive data to make sure that the
4982+
* compiler won't optimize out the data clearing.
4983+
*/
4984+
void kvfree_sensitive(const void *addr, size_t len)
4985+
{
4986+
if (likely(!ZERO_OR_NULL_PTR(addr))) {
4987+
memzero_explicit((void *)addr, len);
4988+
kvfree(addr);
4989+
}
4990+
}
4991+
EXPORT_SYMBOL(kvfree_sensitive);
4992+
4993+
/**
4994+
* kvrealloc - reallocate memory; contents remain unchanged
4995+
* @p: object to reallocate memory for
4996+
* @size: the size to reallocate
4997+
* @flags: the flags for the page level allocator
4998+
*
4999+
* If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
5000+
* and @p is not a %NULL pointer, the object pointed to is freed.
5001+
*
5002+
* If __GFP_ZERO logic is requested, callers must ensure that, starting with the
5003+
* initial memory allocation, every subsequent call to this API for the same
5004+
* memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
5005+
* __GFP_ZERO is not fully honored by this API.
5006+
*
5007+
* In any case, the contents of the object pointed to are preserved up to the
5008+
* lesser of the new and old sizes.
5009+
*
5010+
* This function must not be called concurrently with itself or kvfree() for the
5011+
* same memory allocation.
5012+
*
5013+
* Return: pointer to the allocated memory or %NULL in case of error
5014+
*/
5015+
void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
5016+
{
5017+
void *n;
5018+
5019+
if (is_vmalloc_addr(p))
5020+
return vrealloc_noprof(p, size, flags);
5021+
5022+
n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
5023+
if (!n) {
5024+
/* We failed to krealloc(), fall back to kvmalloc(). */
5025+
n = kvmalloc_noprof(size, flags);
5026+
if (!n)
5027+
return NULL;
5028+
5029+
if (p) {
5030+
/* We already know that `p` is not a vmalloc address. */
5031+
kasan_disable_current();
5032+
memcpy(n, kasan_reset_tag(p), ksize(p));
5033+
kasan_enable_current();
5034+
5035+
kfree(p);
5036+
}
5037+
}
5038+
5039+
return n;
5040+
}
5041+
EXPORT_SYMBOL(kvrealloc_noprof);
5042+
48815043
struct detached_freelist {
48825044
struct slab *slab;
48835045
void *tail;

mm/util.c

Lines changed: 0 additions & 162 deletions
Original file line numberDiff line numberDiff line change
@@ -612,168 +612,6 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
612612
}
613613
EXPORT_SYMBOL(vm_mmap);
614614

615-
static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
616-
{
617-
/*
618-
* We want to attempt a large physically contiguous block first because
619-
* it is less likely to fragment multiple larger blocks and therefore
620-
* contribute to a long term fragmentation less than vmalloc fallback.
621-
* However make sure that larger requests are not too disruptive - no
622-
* OOM killer and no allocation failure warnings as we have a fallback.
623-
*/
624-
if (size > PAGE_SIZE) {
625-
flags |= __GFP_NOWARN;
626-
627-
if (!(flags & __GFP_RETRY_MAYFAIL))
628-
flags |= __GFP_NORETRY;
629-
630-
/* nofail semantic is implemented by the vmalloc fallback */
631-
flags &= ~__GFP_NOFAIL;
632-
}
633-
634-
return flags;
635-
}
636-
637-
/**
638-
* __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
639-
* failure, fall back to non-contiguous (vmalloc) allocation.
640-
* @size: size of the request.
641-
* @b: which set of kmalloc buckets to allocate from.
642-
* @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
643-
* @node: numa node to allocate from
644-
*
645-
* Uses kmalloc to get the memory but if the allocation fails then falls back
646-
* to the vmalloc allocator. Use kvfree for freeing the memory.
647-
*
648-
* GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
649-
* __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
650-
* preferable to the vmalloc fallback, due to visible performance drawbacks.
651-
*
652-
* Return: pointer to the allocated memory of %NULL in case of failure
653-
*/
654-
void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
655-
{
656-
void *ret;
657-
658-
/*
659-
* It doesn't really make sense to fallback to vmalloc for sub page
660-
* requests
661-
*/
662-
ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b),
663-
kmalloc_gfp_adjust(flags, size),
664-
node);
665-
if (ret || size <= PAGE_SIZE)
666-
return ret;
667-
668-
/* non-sleeping allocations are not supported by vmalloc */
669-
if (!gfpflags_allow_blocking(flags))
670-
return NULL;
671-
672-
/* Don't even allow crazy sizes */
673-
if (unlikely(size > INT_MAX)) {
674-
WARN_ON_ONCE(!(flags & __GFP_NOWARN));
675-
return NULL;
676-
}
677-
678-
/*
679-
* kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
680-
* since the callers already cannot assume anything
681-
* about the resulting pointer, and cannot play
682-
* protection games.
683-
*/
684-
return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
685-
flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
686-
node, __builtin_return_address(0));
687-
}
688-
EXPORT_SYMBOL(__kvmalloc_node_noprof);
689-
690-
/**
691-
* kvfree() - Free memory.
692-
* @addr: Pointer to allocated memory.
693-
*
694-
* kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
695-
* It is slightly more efficient to use kfree() or vfree() if you are certain
696-
* that you know which one to use.
697-
*
698-
* Context: Either preemptible task context or not-NMI interrupt.
699-
*/
700-
void kvfree(const void *addr)
701-
{
702-
if (is_vmalloc_addr(addr))
703-
vfree(addr);
704-
else
705-
kfree(addr);
706-
}
707-
EXPORT_SYMBOL(kvfree);
708-
709-
/**
710-
* kvfree_sensitive - Free a data object containing sensitive information.
711-
* @addr: address of the data object to be freed.
712-
* @len: length of the data object.
713-
*
714-
* Use the special memzero_explicit() function to clear the content of a
715-
* kvmalloc'ed object containing sensitive data to make sure that the
716-
* compiler won't optimize out the data clearing.
717-
*/
718-
void kvfree_sensitive(const void *addr, size_t len)
719-
{
720-
if (likely(!ZERO_OR_NULL_PTR(addr))) {
721-
memzero_explicit((void *)addr, len);
722-
kvfree(addr);
723-
}
724-
}
725-
EXPORT_SYMBOL(kvfree_sensitive);
726-
727-
/**
728-
* kvrealloc - reallocate memory; contents remain unchanged
729-
* @p: object to reallocate memory for
730-
* @size: the size to reallocate
731-
* @flags: the flags for the page level allocator
732-
*
733-
* If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
734-
* and @p is not a %NULL pointer, the object pointed to is freed.
735-
*
736-
* If __GFP_ZERO logic is requested, callers must ensure that, starting with the
737-
* initial memory allocation, every subsequent call to this API for the same
738-
* memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
739-
* __GFP_ZERO is not fully honored by this API.
740-
*
741-
* In any case, the contents of the object pointed to are preserved up to the
742-
* lesser of the new and old sizes.
743-
*
744-
* This function must not be called concurrently with itself or kvfree() for the
745-
* same memory allocation.
746-
*
747-
* Return: pointer to the allocated memory or %NULL in case of error
748-
*/
749-
void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
750-
{
751-
void *n;
752-
753-
if (is_vmalloc_addr(p))
754-
return vrealloc_noprof(p, size, flags);
755-
756-
n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
757-
if (!n) {
758-
/* We failed to krealloc(), fall back to kvmalloc(). */
759-
n = kvmalloc_noprof(size, flags);
760-
if (!n)
761-
return NULL;
762-
763-
if (p) {
764-
/* We already know that `p` is not a vmalloc address. */
765-
kasan_disable_current();
766-
memcpy(n, kasan_reset_tag(p), ksize(p));
767-
kasan_enable_current();
768-
769-
kfree(p);
770-
}
771-
}
772-
773-
return n;
774-
}
775-
EXPORT_SYMBOL(kvrealloc_noprof);
776-
777615
/**
778616
* __vmalloc_array - allocate memory for a virtually contiguous array.
779617
* @n: number of elements.

0 commit comments

Comments
 (0)