From ad1d01aece8a36cc324d0dadb91bd508d44a8e98 Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:02 +0000 Subject: [PATCH 01/12] bpf: Verifier, remove some unusual uses of min_t() and max_t() min_t() and max_t() are normally used to change the signedness of a positive value to avoid a signed-v-unsigned compare warning. However they are used here to convert an unsigned 64bit pattern to a signed to a 32/64bit signed number. To avoid any confusion use plain min()/max() and explicitely cast the u64 expression to the correct signed value. Use a simple max() for the max_pkt_offset calulation and delete the comment about why the cast to u32 is safe. Signed-off-by: David Laight --- kernel/bpf/verifier.c | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 098dd7f21c89..82faeff351fe 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2346,12 +2346,12 @@ static void __update_reg32_bounds(struct bpf_reg_state *reg) struct tnum var32_off = tnum_subreg(reg->var_off); /* min signed is max(sign bit) | min(other bits) */ - reg->s32_min_value = max_t(s32, reg->s32_min_value, - var32_off.value | (var32_off.mask & S32_MIN)); + reg->s32_min_value = max(reg->s32_min_value, + (s32)(var32_off.value | (var32_off.mask & S32_MIN))); /* max signed is min(sign bit) | max(other bits) */ - reg->s32_max_value = min_t(s32, reg->s32_max_value, - var32_off.value | (var32_off.mask & S32_MAX)); - reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); + reg->s32_max_value = min(reg->s32_max_value, + (s32)(var32_off.value | (var32_off.mask & S32_MAX))); + reg->u32_min_value = max(reg->u32_min_value, (u32)var32_off.value); reg->u32_max_value = min(reg->u32_max_value, (u32)(var32_off.value | var32_off.mask)); } @@ -2359,11 +2359,11 @@ static void __update_reg32_bounds(struct bpf_reg_state *reg) static void __update_reg64_bounds(struct bpf_reg_state *reg) { /* min signed is max(sign bit) | min(other bits) */ - reg->smin_value = max_t(s64, reg->smin_value, - reg->var_off.value | (reg->var_off.mask & S64_MIN)); + reg->smin_value = max(reg->smin_value, + (s64)(reg->var_off.value | (reg->var_off.mask & S64_MIN))); /* max signed is min(sign bit) | max(other bits) */ - reg->smax_value = min_t(s64, reg->smax_value, - reg->var_off.value | (reg->var_off.mask & S64_MAX)); + reg->smax_value = min(reg->smax_value, + (s64)(reg->var_off.value | (reg->var_off.mask & S64_MAX))); reg->umin_value = max(reg->umin_value, reg->var_off.value); reg->umax_value = min(reg->umax_value, reg->var_off.value | reg->var_off.mask); @@ -6185,15 +6185,8 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, return err; } - /* __check_mem_access has made sure "off + size - 1" is within u16. - * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, - * otherwise find_good_pkt_pointers would have refused to set range info - * that __check_mem_access would have rejected this pkt access. - * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. - */ - env->prog->aux->max_pkt_offset = - max_t(u32, env->prog->aux->max_pkt_offset, - off + reg->umax_value + size - 1); + env->prog->aux->max_pkt_offset = max(env->prog->aux->max_pkt_offset, + off + reg->umax_value + size - 1); return err; } From 9c03117abfd54a45c3535817f775a03ff9d32f9d Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:03 +0000 Subject: [PATCH 02/12] net/core/flow_dissector: Fix cap of __skb_flow_dissect() return value. There are some dodgy clamp_t(u16, ...) and min_t(u16, ...). __skb_flow_dissect() tries to cap its return value with: key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); however this casts skb->len to u16 before the comparison. While both nboff and hlen are 'small', skb->len could be 0x10001 which gets converted to 1 by the cast. This gives an invalid (small) value for thoff for valid packets. bpf_flow_dissect() used clamp_t(u16, ...) to set both flow_keys->nhoff and flow_keys->thoff. While I think these can't lose significant bits the casts are unnecessary plain clamp(...) works fine. Fixes: d0c081b49137c ("flow_dissector: properly cap thoff field") Signed-off-by: David Laight --- net/core/flow_dissector.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 1b61bb25ba0e..e362160bb73d 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -1023,9 +1023,8 @@ u32 bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, result = bpf_prog_run_pin_on_cpu(prog, ctx); - flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen); - flow_keys->thoff = clamp_t(u16, flow_keys->thoff, - flow_keys->nhoff, hlen); + flow_keys->nhoff = clamp(flow_keys->nhoff, nhoff, hlen); + flow_keys->thoff = clamp(flow_keys->thoff, flow_keys->nhoff, hlen); return result; } @@ -1687,7 +1686,7 @@ bool __skb_flow_dissect(const struct net *net, ret = true; out: - key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); + key_control->thoff = umin(nhoff, skb ? skb->len : hlen); key_basic->n_proto = proto; key_basic->ip_proto = ip_proto; From 3a6cda0bfdf5177807e71cc2cf823e868a84fb12 Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:04 +0000 Subject: [PATCH 03/12] net: ethtool: Use min3() instead of nested min_t(u16,...) In ethtool_cmis_cdb_execute_epl_cmd() change space_left and bytes_to_write from u16 to u32. Although the values may fit in 16 bits, 32bit variables will generate better code. Replace the nested min_t(u16, bytes_left, min_t(u16, space_left, x)) with a call to min3(). Signed-off-by: David Laight --- net/ethtool/cmis_cdb.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/net/ethtool/cmis_cdb.c b/net/ethtool/cmis_cdb.c index 3057576bc81e..1406205e047e 100644 --- a/net/ethtool/cmis_cdb.c +++ b/net/ethtool/cmis_cdb.c @@ -573,12 +573,11 @@ ethtool_cmis_cdb_execute_epl_cmd(struct net_device *dev, while (offset <= CMIS_CDB_EPL_FW_BLOCK_OFFSET_END && bytes_written < epl_len) { u32 bytes_left = epl_len - bytes_written; - u16 space_left, bytes_to_write; + u32 space_left, bytes_to_write; space_left = CMIS_CDB_EPL_FW_BLOCK_OFFSET_END - offset + 1; - bytes_to_write = min_t(u16, bytes_left, - min_t(u16, space_left, - args->read_write_len_ext)); + bytes_to_write = min3(bytes_left, space_left, + args->read_write_len_ext); err = __ethtool_cmis_cdb_execute_cmd(dev, page_data, page, offset, From c8d3c7c5394adf6b720974b7b051b0495313e824 Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:05 +0000 Subject: [PATCH 04/12] ipv6: __ip6_append_data() don't abuse max_t() casts The implicit casts done by max_t() should only really be used to convert positive values to signed or unsigned types. In the EMSGSIZE error path pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0); is being used to convert a large unsigned value to a signed negative one. Rework using a signed temporary variable and max(pmtu, 0), as well as casting sizeof() to (int) - which is where the unsignedness comes from. Signed-off-by: David Laight --- net/ipv6/ip6_output.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index f904739e99b9..6fecf2f2cc9a 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1440,7 +1440,7 @@ static int __ip6_append_data(struct sock *sk, struct sk_buff *skb, *skb_prev = NULL; struct inet_cork *cork = &cork_full->base; struct flowi6 *fl6 = &cork_full->fl.u.ip6; - unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu; + unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu; struct ubuf_info *uarg = NULL; int exthdrlen = 0; int dst_exthdrlen = 0; @@ -1504,9 +1504,10 @@ static int __ip6_append_data(struct sock *sk, maxnonfragsize = mtu; if (cork->length + length > maxnonfragsize - headersize) { + int pmtu; emsgsize: - pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0); - ipv6_local_error(sk, EMSGSIZE, fl6, pmtu); + pmtu = mtu - headersize + (int)sizeof(struct ipv6hdr); + ipv6_local_error(sk, EMSGSIZE, fl6, max(pmtu, 0)); return -EMSGSIZE; } From 03969f9f13ca1dd9712124a772543ff7b5feb411 Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:17 +0000 Subject: [PATCH 05/12] drivers/net/ethernet/realtek: use min() instead of min_t() min_t(unsigned int, a, b) casts an 'unsigned long' to 'unsigned int'. Use min(a, b) instead as it promotes any 'unsigned int' to 'unsigned long' and so cannot discard significant bits. In this case the 'unsigned long' value is small enough that the result is ok. Detected by an extra check added to min_t(). Signed-off-by: David Laight --- drivers/net/ethernet/realtek/r8169_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index d18734fe12e4..3e636983df4a 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4238,8 +4238,7 @@ static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, } if (trans_data_len < sizeof(struct udphdr)) - padto = max_t(unsigned int, padto, - len + sizeof(struct udphdr) - trans_data_len); + padto = max(padto, len + sizeof(struct udphdr) - trans_data_len); } return padto; From 914157125f45ece65ef7b3e369880b136f5516c1 Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:30 +0000 Subject: [PATCH 06/12] bpf: use min() instead of min_t() min_t(unsigned int, a, b) casts an 'unsigned long' to 'unsigned int'. Use min(a, b) instead as it promotes any 'unsigned int' to 'unsigned long' and so cannot discard significant bits. In this case the 'unsigned long' value is small enough that the result is ok. Detected by an extra check added to min_t(). Signed-off-by: David Laight --- kernel/bpf/core.c | 4 ++-- kernel/bpf/log.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ef4448f18aad..cc2012c25fb8 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1081,7 +1081,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, bpf_fill_ill_insns(hdr, size); hdr->size = size; - hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), + hole = min(size - (proglen + sizeof(*hdr)), PAGE_SIZE - sizeof(*hdr)); start = get_random_u32_below(hole) & ~(alignment - 1); @@ -1142,7 +1142,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr, bpf_fill_ill_insns(*rw_header, size); (*rw_header)->size = size; - hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)), + hole = min(size - (proglen + sizeof(*ro_header)), BPF_PROG_CHUNK_SIZE - sizeof(*ro_header)); start = get_random_u32_below(hole) & ~(alignment - 1); diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c index a0c3b35de2ce..5d2d7d6e71e5 100644 --- a/kernel/bpf/log.c +++ b/kernel/bpf/log.c @@ -79,7 +79,7 @@ void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, /* check if we have at least something to put into user buf */ new_n = 0; if (log->end_pos < log->len_total) { - new_n = min_t(u32, log->len_total - log->end_pos, n); + new_n = min(log->len_total - log->end_pos, n); log->kbuf[new_n - 1] = '\0'; } From cbed533aa96347d6b071de28720376b49ac92a3b Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:31 +0000 Subject: [PATCH 07/12] bpf: use min() instead of min_t() min_t(unsigned int, a, b) casts an 'unsigned long' to 'unsigned int'. Use min(a, b) instead as it promotes any 'unsigned int' to 'unsigned long' and so cannot discard significant bits. In this case the 'unsigned long' value is small enough that the result is ok. Detected by an extra check added to min_t(). Signed-off-by: David Laight --- kernel/trace/bpf_trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index a795f7afbf3d..6869d186b30b 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1515,7 +1515,7 @@ BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, if (!buf || (size % br_entry_size != 0)) return -EINVAL; - to_copy = min_t(u32, br_stack->nr * br_entry_size, size); + to_copy = min(br_stack->nr * br_entry_size, size); memcpy(buf, br_stack->entries, to_copy); return to_copy; From 54c5a7a4b80c3bf05c146f56e475b634513aba60 Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:36 +0000 Subject: [PATCH 08/12] net: Don't pass bitfields to max_t() It is invalid to use sizeof() or typeof() in bitfields which stops them being passed to max(). This has been fixed by using max_t(). I want to add some checks to max_t() to detect cases where the cast discards non-zero high bits - which uses sizeof(). So add 0 to the bitfield (converting it to int) then use max(). Signed-off-by: David Laight --- include/net/tcp_ecn.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h index f13e5cd2b1ac..14c00404a95f 100644 --- a/include/net/tcp_ecn.h +++ b/include/net/tcp_ecn.h @@ -196,7 +196,7 @@ static inline void tcp_accecn_opt_demand_min(struct sock *sk, struct tcp_sock *tp = tcp_sk(sk); u8 opt_demand; - opt_demand = max_t(u8, opt_demand_min, tp->accecn_opt_demand); + opt_demand = max(opt_demand_min, tp->accecn_opt_demand + 0); tp->accecn_opt_demand = opt_demand; } @@ -303,8 +303,7 @@ static inline void tcp_ecn_received_counters(struct sock *sk, u32 bytes_mask = GENMASK_U32(31, 22); tp->received_ecn_bytes[ecnfield - 1] += len; - tp->accecn_minlen = max_t(u8, tp->accecn_minlen, - minlen); + tp->accecn_minlen = max(tp->accecn_minlen + 0, minlen); /* Send AccECN option at least once per 2^22-byte * increase in any ECN byte counter. From 4c10477ee74c60d4fd80ca92286031413e551ce0 Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:37 +0000 Subject: [PATCH 09/12] net/core: Change loop conditions so min() can be used Loops like: int copied = ...; ... while (copied) { use = min_t(type, copied, PAGE_SIZE - offset); ... copied -= 0; } can be converted to a plain min() if the comparison is changed to: while (copied > 0) { This removes any chance of high bits being discded by min_t(). (In the case above PAGE_SIZE is 64bits so the 'int' cast is safe, but there are plenty of cases where the check shows up bugs.) Signed-off-by: David Laight --- net/core/datagram.c | 6 +++--- net/core/skmsg.c | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/net/core/datagram.c b/net/core/datagram.c index c285c6465923..555f38b89729 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -664,8 +664,8 @@ int zerocopy_fill_skb_from_iter(struct sk_buff *skb, head = compound_head(pages[n]); order = compound_order(head); - for (refs = 0; copied != 0; start = 0) { - int size = min_t(int, copied, PAGE_SIZE - start); + for (refs = 0; copied > 0; start = 0) { + int size = min(copied, PAGE_SIZE - start); if (pages[n] - head > (1UL << order) - 1) { head = compound_head(pages[n]); @@ -783,7 +783,7 @@ EXPORT_SYMBOL(__zerocopy_sg_from_iter); */ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from) { - int copy = min_t(int, skb_headlen(skb), iov_iter_count(from)); + int copy = min(skb_headlen(skb), iov_iter_count(from)); /* copy up to skb headlen */ if (skb_copy_datagram_from_iter(skb, 0, from, copy)) diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 2ac7731e1e0a..b58e319f4e2e 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -335,8 +335,8 @@ int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, bytes -= copied; msg->sg.size += copied; - while (copied) { - use = min_t(int, copied, PAGE_SIZE - offset); + while (copied > 0) { + use = min(copied, PAGE_SIZE - offset); sg_set_page(&msg->sg.data[msg->sg.end], pages[i], use, offset); sg_unmark_end(&msg->sg.data[msg->sg.end]); From f57a33b0ec038dfceb68d1feffa91b0b31220e53 Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:38 +0000 Subject: [PATCH 10/12] net: use min() instead of min_t() min_t(unsigned int, a, b) casts an 'unsigned long' to 'unsigned int'. Use min(a, b) instead as it promotes any 'unsigned int' to 'unsigned long' and so cannot discard significant bits. In this case the 'unsigned long' value is small enough that the result is ok. Detected by an extra check added to min_t(). Signed-off-by: David Laight --- net/core/net-sysfs.c | 3 +-- net/ipv4/fib_trie.c | 2 +- net/ipv4/tcp_input.c | 4 ++-- net/ipv4/tcp_output.c | 5 ++--- net/ipv4/tcp_timer.c | 4 ++-- net/ipv6/addrconf.c | 8 ++++---- net/ipv6/ndisc.c | 5 ++--- net/packet/af_packet.c | 2 +- net/unix/af_unix.c | 4 ++-- 9 files changed, 17 insertions(+), 20 deletions(-) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index ca878525ad7c..8aaeed38be0b 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -985,8 +985,7 @@ static int netdev_rx_queue_set_rps_mask(struct netdev_rx_queue *queue, struct rps_map *old_map, *map; int cpu, i; - map = kzalloc(max_t(unsigned int, - RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), + map = kzalloc(max(RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL); if (!map) return -ENOMEM; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 59a6f0a9638f..e85441717222 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -710,7 +710,7 @@ static unsigned char update_suffix(struct key_vector *tn) * tn->pos + tn->bits, the second highest node will have a suffix * length at most of tn->pos + tn->bits - 1 */ - slen_max = min_t(unsigned char, tn->pos + tn->bits - 1, tn->slen); + slen_max = min(tn->pos + tn->bits - 1, tn->slen); /* search though the list of children looking for nodes that might * have a suffix greater than the one we currently have. This is diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e4a979b75cc6..8c9eb91190ae 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2870,7 +2870,7 @@ static void tcp_mtup_probe_success(struct sock *sk) val = (u64)tcp_snd_cwnd(tp) * tcp_mss_to_mtu(sk, tp->mss_cache); do_div(val, icsk->icsk_mtup.probe_size); DEBUG_NET_WARN_ON_ONCE((u32)val != val); - tcp_snd_cwnd_set(tp, max_t(u32, 1U, val)); + tcp_snd_cwnd_set(tp, max(1, val)); tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_jiffies32; @@ -3323,7 +3323,7 @@ void tcp_rearm_rto(struct sock *sk) /* delta_us may not be positive if the socket is locked * when the retrans timer fires and is rescheduled. */ - rto = usecs_to_jiffies(max_t(int, delta_us, 1)); + rto = usecs_to_jiffies(max(delta_us, 1)); } tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, true); } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index b94efb3050d2..516ea138993d 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3076,7 +3076,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) jiffies_to_usecs(inet_csk(sk)->icsk_rto) : tcp_rto_delta_us(sk); /* How far in future is RTO? */ if (rto_delta_us > 0) - timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); + timeout = min(timeout, usecs_to_jiffies(rto_delta_us)); tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, true); return true; @@ -4382,8 +4382,7 @@ void tcp_send_delayed_ack(struct sock *sk) * directly. */ if (tp->srtt_us) { - int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), - TCP_DELACK_MIN); + int rtt = max(usecs_to_jiffies(tp->srtt_us >> 3), TCP_DELACK_MIN); if (rtt < max_ato) max_ato = rtt; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 2dd73a4e8e51..9d5fc405e76a 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -43,7 +43,7 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) if (remaining <= 0) return 1; /* user timeout has passed; fire ASAP */ - return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); + return min(icsk->icsk_rto, msecs_to_jiffies(remaining)); } u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when) @@ -504,7 +504,7 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk, */ if (rtx_delta > user_timeout) return true; - timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout)); + timeout = umin(timeout, msecs_to_jiffies(user_timeout)); } /* Note: timer interrupt might have been delayed by at least one jiffy, * and tp->rcv_tstamp might very well have been written recently. diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 40e9c336f6c5..930e34af4331 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1422,11 +1422,11 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block) if_public_preferred_lft = ifp->prefered_lft; memset(&cfg, 0, sizeof(cfg)); - cfg.valid_lft = min_t(__u32, ifp->valid_lft, - READ_ONCE(idev->cnf.temp_valid_lft) + age); + cfg.valid_lft = min(ifp->valid_lft, + READ_ONCE(idev->cnf.temp_valid_lft) + age); cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor; - cfg.preferred_lft = min_t(__u32, if_public_preferred_lft, cfg.preferred_lft); - cfg.preferred_lft = min_t(__u32, cfg.valid_lft, cfg.preferred_lft); + cfg.preferred_lft = min(if_public_preferred_lft, cfg.preferred_lft); + cfg.preferred_lft = min(cfg.valid_lft, cfg.preferred_lft); cfg.plen = ifp->prefix_len; tmp_tstamp = ifp->tstamp; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index f427e41e9c49..b3bcbf0d864b 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1731,9 +1731,8 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) neigh_release(neigh); } - rd_len = min_t(unsigned int, - IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(*msg) - optlen, - skb->len + 8); + rd_len = min(IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(*msg) - optlen, + skb->len + 8); rd_len &= ~0x7; optlen += rd_len; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 173e6edda08f..af0c74f7b4d4 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3015,7 +3015,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); - linear = max(linear, min_t(int, len, dev->hard_header_len)); + linear = max(linear, min(len, dev->hard_header_len)); skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 768098dec231..e573fcb21a01 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2448,7 +2448,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, /* allow fallback to order-0 allocations */ size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ); - data_len = max_t(int, 0, size - SKB_MAX_HEAD(0)); + data_len = max(0, size - (int)SKB_MAX_HEAD(0)); data_len = min_t(size_t, size, PAGE_ALIGN(data_len)); @@ -3054,7 +3054,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state, sunaddr = NULL; } - chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); + chunk = min(unix_skb_len(skb) - skip, size); chunk = state->recv_actor(skb, skip, chunk, state); if (chunk < 0) { if (copied == 0) From 1ce101ed706f6af57d350735fa1dd46d609ae1b0 Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:39 +0000 Subject: [PATCH 11/12] net/netlink: Use umin() to avoid min_t(int, ...) discarding high bits The scan limit in genl_allocate_reserve_groups() is: min_t(int, id + n_groups, mc_groups_longs * BITS_PER_LONG); While 'id' and 'n_groups' are both 'int', 'mc_groups_longs' is 'unsigned long' (BITS_PER_LONG is 'int'). These inconsistent types (all the values are small and non-negative) means that a simple min() fails. When checks for masking high bits are added to min_t() that also fails. Instead use umin() so safely convert all the values to unsigned. Move the limit calculation outside the loop for efficiency and readability. Signed-off-by: David Laight --- net/netlink/genetlink.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 978c129c6095..a802dd8ead2d 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -395,10 +395,11 @@ static unsigned int genl_op_iter_idx(struct genl_op_iter *iter) return iter->cmd_idx; } -static int genl_allocate_reserve_groups(int n_groups, int *first_id) +static noinline_for_stack int genl_allocate_reserve_groups(int n_groups, int *first_id) { unsigned long *new_groups; int start = 0; + int limit; int i; int id; bool fits; @@ -414,10 +415,8 @@ static int genl_allocate_reserve_groups(int n_groups, int *first_id) start); fits = true; - for (i = id; - i < min_t(int, id + n_groups, - mc_groups_longs * BITS_PER_LONG); - i++) { + limit = umin(id + n_groups, mc_groups_longs * BITS_PER_LONG); + for (i = id; i < limit; i++) { if (test_bit(i, mc_groups)) { start = i; fits = false; From 3eaf408e9c40139e5a1baadf43631592934b49e2 Mon Sep 17 00:00:00 2001 From: David Laight Date: Wed, 19 Nov 2025 22:41:40 +0000 Subject: [PATCH 12/12] net/mptcp: Change some dubious min_t(int, ...) to min() There are two: min_t(int, xxx, mptcp_wnd_end(msk) - msk->snd_nxt); Both mptcp_wnd_end(msk) and msk->snd_nxt are u64, their difference (aka the window size) might be limited to 32 bits - but that isn't knowable from this code. So checks being added to min_t() detect the potential discard of significant bits. Provided the 'avail_size' and return of mptcp_check_allowed_size() are changed to an unsigned type (size_t matches the type the caller uses) both min_t() can be changed to min(). Signed-off-by: David Laight --- net/mptcp/protocol.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 90b4aeca2596..43a775012389 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1125,8 +1125,8 @@ struct mptcp_sendmsg_info { bool data_lock_held; }; -static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk, - u64 data_seq, int avail_size) +static size_t mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk, + u64 data_seq, size_t avail_size) { u64 window_end = mptcp_wnd_end(msk); u64 mptcp_snd_wnd; @@ -1135,7 +1135,7 @@ static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *s return avail_size; mptcp_snd_wnd = window_end - data_seq; - avail_size = min_t(unsigned int, mptcp_snd_wnd, avail_size); + avail_size = min(mptcp_snd_wnd, avail_size); if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) { tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd); @@ -1479,7 +1479,7 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) if (!ssk || !sk_stream_memory_free(ssk)) return NULL; - burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt); + burst = min(MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt); wmem = READ_ONCE(ssk->sk_wmem_queued); if (!burst) return ssk;