diff --git a/include/zephyr/net/net_if.h b/include/zephyr/net/net_if.h index 35f5e8577ffd6..cfba714d6631a 100644 --- a/include/zephyr/net/net_if.h +++ b/include/zephyr/net/net_if.h @@ -94,6 +94,11 @@ struct net_if_addr { struct { /** Duplicate address detection (DAD) timer */ sys_snode_t dad_node; + + /** DAD needed list node */ + sys_snode_t dad_need_node; + + /** DAD start time */ uint32_t dad_start; /** How many times we have done DAD */ @@ -104,6 +109,11 @@ struct net_if_addr { struct { /** Address conflict detection (ACD) timer. */ sys_snode_t acd_node; + + /** ACD needed list node */ + sys_snode_t acd_need_node; + + /** ACD timeout value. */ k_timepoint_t acd_timeout; /** ACD probe/announcement counter. */ diff --git a/subsys/net/ip/net_if.c b/subsys/net/ip/net_if.c index 3c1f4aa5382ce..597ca39bb4aec 100644 --- a/subsys/net/ip/net_if.c +++ b/subsys/net/ip/net_if.c @@ -1324,8 +1324,9 @@ void net_if_ipv6_start_dad(struct net_if *iface, void net_if_start_dad(struct net_if *iface) { - struct net_if_addr *ifaddr; + struct net_if_addr *ifaddr, *next; struct net_if_ipv6 *ipv6; + sys_slist_t dad_needed; struct in6_addr addr = { }; int ret; @@ -1357,6 +1358,8 @@ void net_if_start_dad(struct net_if *iface) /* Start DAD for all the addresses that were added earlier when * the interface was down. */ + sys_slist_init(&dad_needed); + ARRAY_FOR_EACH(ipv6->unicast, i) { if (!ipv6->unicast[i].is_used || ipv6->unicast[i].address.family != AF_INET6 || @@ -1366,9 +1369,21 @@ void net_if_start_dad(struct net_if *iface) continue; } - net_if_ipv6_start_dad(iface, &ipv6->unicast[i]); + sys_slist_prepend(&dad_needed, &ipv6->unicast[i].dad_need_node); } + net_if_unlock(iface); + + /* Start DAD for all the addresses without holding the iface lock + * to avoid any possible mutex deadlock issues. + */ + SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&dad_needed, + ifaddr, next, dad_need_node) { + net_if_ipv6_start_dad(iface, ifaddr); + } + + return; + out: net_if_unlock(iface); } @@ -1415,7 +1430,10 @@ void net_if_ipv6_dad_failed(struct net_if *iface, const struct in6_addr *addr) net_if_ipv6_addr_rm(iface, addr); if (IS_ENABLED(CONFIG_NET_IPV6_PE) && iface->pe_enabled) { + net_if_unlock(iface); + net_ipv6_pe_start(iface, addr, timeout, preferred_lifetime); + return; } out: @@ -1519,6 +1537,8 @@ void net_if_start_rs(struct net_if *iface) goto out; } + net_if_unlock(iface); + NET_DBG("Starting ND/RS for iface %p", iface); if (!net_ipv6_start_rs(iface)) { @@ -1534,6 +1554,7 @@ void net_if_start_rs(struct net_if *iface) } } + return; out: net_if_unlock(iface); } @@ -1941,6 +1962,7 @@ struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface, { struct net_if_addr *ifaddr = NULL; struct net_if_ipv6 *ipv6; + bool do_dad = false; net_if_lock(iface); @@ -1992,8 +2014,7 @@ struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface, */ join_mcast_nodes(iface, &ipv6->unicast[i].address.in6_addr); - - net_if_ipv6_start_dad(iface, &ipv6->unicast[i]); + do_dad = true; } else { /* If DAD is not done for point-to-point links, then * the address is usable immediately. @@ -2007,9 +2028,17 @@ struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface, sizeof(struct in6_addr)); ifaddr = &ipv6->unicast[i]; - goto out; + break; } + net_if_unlock(iface); + + if (ifaddr != NULL && do_dad) { + net_if_ipv6_start_dad(iface, ifaddr); + } + + return ifaddr; + out: net_if_unlock(iface); @@ -4179,7 +4208,9 @@ void net_if_ipv4_start_acd(struct net_if *iface, struct net_if_addr *ifaddr) void net_if_start_acd(struct net_if *iface) { + struct net_if_addr *ifaddr, *next; struct net_if_ipv4 *ipv4; + sys_slist_t acd_needed; int ret; net_if_lock(iface); @@ -4201,6 +4232,11 @@ void net_if_start_acd(struct net_if *iface) ipv4->conflict_cnt = 0; + /* Start ACD for all the addresses that were added earlier when + * the interface was down. + */ + sys_slist_init(&acd_needed); + /* Start ACD for all the addresses that were added earlier when * the interface was down. */ @@ -4212,9 +4248,21 @@ void net_if_start_acd(struct net_if *iface) continue; } - net_if_ipv4_start_acd(iface, &ipv4->unicast[i].ipv4); + sys_slist_prepend(&acd_needed, &ipv4->unicast[i].ipv4.acd_need_node); } + net_if_unlock(iface); + + /* Start ACD for all the addresses without holding the iface lock + * to avoid any possible mutex deadlock issues. + */ + SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&acd_needed, + ifaddr, next, acd_need_node) { + net_if_ipv4_start_acd(iface, ifaddr); + } + + return; + out: net_if_unlock(iface); } @@ -4306,7 +4354,8 @@ struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface, if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) && !net_ipv4_is_addr_loopback(addr)) { - net_if_ipv4_start_acd(iface, ifaddr); + /* ACD is started after the lock is released. */ + ; } else { ifaddr->addr_state = NET_ADDR_PREFERRED; } @@ -4314,7 +4363,12 @@ struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface, net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ADDR_ADD, iface, &ifaddr->address.in_addr, sizeof(struct in_addr)); - goto out; + + net_if_unlock(iface); + + net_if_ipv4_start_acd(iface, ifaddr); + + return ifaddr; } out: