Skip to content

Commit bb3ca38

Browse files
mhklinuxliuw
authored andcommitted
hv_balloon: Use kernel macros to simplify open coded sequences
Code sequences equivalent to ALIGN(), ALIGN_DOWN(), and umin() are currently open coded. Change these to use the kernel macro to improve code clarity. ALIGN() and ALIGN_DOWN() require the alignment value to be a power of 2, which is the case here. Reviewed-by: David Hildenbrand <[email protected]> Signed-off-by: Michael Kelley <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Wei Liu <[email protected]> Message-ID: <[email protected]>
1 parent 1613e60 commit bb3ca38

File tree

1 file changed

+8
-32
lines changed

1 file changed

+8
-32
lines changed

drivers/hv/hv_balloon.c

Lines changed: 8 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -729,15 +729,8 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
729729

730730
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
731731
has->ha_end_pfn += HA_CHUNK;
732-
733-
if (total_pfn > HA_CHUNK) {
734-
processed_pfn = HA_CHUNK;
735-
total_pfn -= HA_CHUNK;
736-
} else {
737-
processed_pfn = total_pfn;
738-
total_pfn = 0;
739-
}
740-
732+
processed_pfn = umin(total_pfn, HA_CHUNK);
733+
total_pfn -= processed_pfn;
741734
has->covered_end_pfn += processed_pfn;
742735
}
743736

@@ -800,7 +793,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
800793
{
801794
struct hv_hotadd_state *has;
802795
struct hv_hotadd_gap *gap;
803-
unsigned long residual, new_inc;
796+
unsigned long residual;
804797
int ret = 0;
805798

806799
guard(spinlock_irqsave)(&dm_device.ha_lock);
@@ -836,15 +829,9 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
836829
* our current limit; extend it.
837830
*/
838831
if ((start_pfn + pfn_cnt) > has->end_pfn) {
832+
/* Extend the region by multiples of HA_CHUNK */
839833
residual = (start_pfn + pfn_cnt - has->end_pfn);
840-
/*
841-
* Extend the region by multiples of HA_CHUNK.
842-
*/
843-
new_inc = (residual / HA_CHUNK) * HA_CHUNK;
844-
if (residual % HA_CHUNK)
845-
new_inc += HA_CHUNK;
846-
847-
has->end_pfn += new_inc;
834+
has->end_pfn += ALIGN(residual, HA_CHUNK);
848835
}
849836

850837
ret = 1;
@@ -915,9 +902,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
915902
*/
916903
size = (has->end_pfn - has->ha_end_pfn);
917904
if (pfn_cnt <= size) {
918-
size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
919-
if (pfn_cnt % HA_CHUNK)
920-
size += HA_CHUNK;
905+
size = ALIGN(pfn_cnt, HA_CHUNK);
921906
} else {
922907
pfn_cnt = size;
923908
}
@@ -1011,24 +996,15 @@ static void hot_add_req(struct work_struct *dummy)
1011996
rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
1012997

1013998
if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
1014-
unsigned long region_size;
1015-
unsigned long region_start;
1016-
1017999
/*
10181000
* The host has not specified the hot-add region.
10191001
* Based on the hot-add page range being specified,
10201002
* compute a hot-add region that can cover the pages
10211003
* that need to be hot-added while ensuring the alignment
10221004
* and size requirements of Linux as it relates to hot-add.
10231005
*/
1024-
region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
1025-
if (pfn_cnt % HA_CHUNK)
1026-
region_size += HA_CHUNK;
1027-
1028-
region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
1029-
1030-
rg_start = region_start;
1031-
rg_sz = region_size;
1006+
rg_start = ALIGN_DOWN(pg_start, HA_CHUNK);
1007+
rg_sz = ALIGN(pfn_cnt, HA_CHUNK);
10321008
}
10331009

10341010
if (do_hot_add)

0 commit comments

Comments
 (0)