diff --git a/include/zephyr/net/mld.h b/include/zephyr/net/mld.h new file mode 100644 index 000000000000..031eb03c931e --- /dev/null +++ b/include/zephyr/net/mld.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2016 Intel Corporation + * Copyright (c) 2024 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** @file + * @brief Multicast Listener Discovery API + */ + +#ifndef ZEPHYR_INCLUDE_NET_MLD_H_ +#define ZEPHYR_INCLUDE_NET_MLD_H_ + +/** + * @brief MLD (Multicast Listener Discovery) + * @defgroup mld Multicast Listener Discovery API + * @since 1.8 + * @version 0.8.0 + * @ingroup networking + * @{ + */ + +#include + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Join a given multicast group. + * + * @param iface Network interface where join message is sent + * @param addr Multicast group to join + * + * @return 0 if joining was done, <0 otherwise. + */ +#if defined(CONFIG_NET_IPV6_MLD) +int net_ipv6_mld_join(struct net_if *iface, const struct in6_addr *addr); +#else +static inline int +net_ipv6_mld_join(struct net_if *iface, const struct in6_addr *addr) +{ + ARG_UNUSED(addr); + ARG_UNUSED(iface); + + return -ENOTSUP; +} +#endif /* CONFIG_NET_IPV6_MLD */ + +/** + * @brief Leave a given multicast group. + * + * @param iface Network interface where leave message is sent + * @param addr Multicast group to leave + * + * @return 0 if leaving is done, <0 otherwise. + */ +#if defined(CONFIG_NET_IPV6_MLD) +int net_ipv6_mld_leave(struct net_if *iface, const struct in6_addr *addr); +#else +static inline int +net_ipv6_mld_leave(struct net_if *iface, const struct in6_addr *addr) +{ + ARG_UNUSED(iface); + ARG_UNUSED(addr); + + return -ENOTSUP; +} +#endif /* CONFIG_NET_IPV6_MLD */ + +#ifdef __cplusplus +} +#endif + +/** + * @} + */ + +#endif /* ZEPHYR_INCLUDE_NET_MLD_H_ */ diff --git a/include/zephyr/net/net_if.h b/include/zephyr/net/net_if.h index c94d6644314b..7795f5659766 100644 --- a/include/zephyr/net/net_if.h +++ b/include/zephyr/net/net_if.h @@ -300,7 +300,7 @@ struct net_offload; #endif /* CONFIG_NET_OFFLOAD */ /** @cond INTERNAL_HIDDEN */ -#if defined(CONFIG_NET_NATIVE_IPV6) +#if defined(CONFIG_NET_IPV6) #define NET_IF_MAX_IPV6_ADDR CONFIG_NET_IF_UNICAST_IPV6_ADDR_COUNT #define NET_IF_MAX_IPV6_MADDR CONFIG_NET_IF_MCAST_IPV6_ADDR_COUNT #define NET_IF_MAX_IPV6_PREFIX CONFIG_NET_IF_IPV6_PREFIX_COUNT @@ -422,7 +422,7 @@ struct net_if_dhcpv6 { #endif /* defined(CONFIG_NET_DHCPV6) && defined(CONFIG_NET_NATIVE_IPV6) */ /** @cond INTERNAL_HIDDEN */ -#if defined(CONFIG_NET_NATIVE_IPV4) +#if defined(CONFIG_NET_IPV4) #define NET_IF_MAX_IPV4_ADDR CONFIG_NET_IF_UNICAST_IPV4_ADDR_COUNT #define NET_IF_MAX_IPV4_MADDR CONFIG_NET_IF_MCAST_IPV4_ADDR_COUNT #else @@ -542,11 +542,11 @@ struct net_if_ipv4_autoconf { * @brief Network interface IP address configuration. */ struct net_if_ip { -#if defined(CONFIG_NET_NATIVE_IPV6) +#if defined(CONFIG_NET_IPV6) struct net_if_ipv6 *ipv6; #endif /* CONFIG_NET_IPV6 */ -#if defined(CONFIG_NET_NATIVE_IPV4) +#if defined(CONFIG_NET_IPV4) struct net_if_ipv4 *ipv4; #endif /* CONFIG_NET_IPV4 */ }; @@ -1827,7 +1827,16 @@ bool net_if_ipv6_router_rm(struct net_if_router *router); * * @return Hop limit */ +#if defined(CONFIG_NET_NATIVE_IPV6) uint8_t net_if_ipv6_get_hop_limit(struct net_if *iface); +#else +static inline uint8_t net_if_ipv6_get_hop_limit(struct net_if *iface) +{ + ARG_UNUSED(iface); + + return 0; +} +#endif /* CONFIG_NET_NATIVE_IPV6 */ /** * @brief Set the default IPv6 hop limit of a given interface. @@ -1835,7 +1844,16 @@ uint8_t net_if_ipv6_get_hop_limit(struct net_if *iface); * @param iface Network interface * @param hop_limit New hop limit */ +#if defined(CONFIG_NET_NATIVE_IPV6) void net_if_ipv6_set_hop_limit(struct net_if *iface, uint8_t hop_limit); +#else +static inline void net_if_ipv6_set_hop_limit(struct net_if *iface, + uint8_t hop_limit) +{ + ARG_UNUSED(iface); + ARG_UNUSED(hop_limit); +} +#endif /* CONFIG_NET_NATIVE_IPV6 */ /** @cond INTERNAL_HIDDEN */ @@ -1860,7 +1878,16 @@ static inline void net_ipv6_set_hop_limit(struct net_if *iface, * * @return Hop limit */ +#if defined(CONFIG_NET_NATIVE_IPV6) uint8_t net_if_ipv6_get_mcast_hop_limit(struct net_if *iface); +#else +static inline uint8_t net_if_ipv6_get_mcast_hop_limit(struct net_if *iface) +{ + ARG_UNUSED(iface); + + return 0; +} +#endif /* CONFIG_NET_NATIVE_IPV6 */ /** * @brief Set the default IPv6 multicast hop limit of a given interface. @@ -1868,7 +1895,16 @@ uint8_t net_if_ipv6_get_mcast_hop_limit(struct net_if *iface); * @param iface Network interface * @param hop_limit New hop limit */ +#if defined(CONFIG_NET_NATIVE_IPV6) void net_if_ipv6_set_mcast_hop_limit(struct net_if *iface, uint8_t hop_limit); +#else +static inline void net_if_ipv6_set_mcast_hop_limit(struct net_if *iface, + uint8_t hop_limit) +{ + ARG_UNUSED(iface); + ARG_UNUSED(hop_limit); +} +#endif /* CONFIG_NET_NATIVE_IPV6 */ /** * @brief Set IPv6 reachable time for a given interface @@ -2002,7 +2038,7 @@ static inline uint32_t net_if_ipv6_get_retrans_timer(struct net_if *iface) * @return Pointer to IPv6 address to use, NULL if no IPv6 address * could be found. */ -#if defined(CONFIG_NET_NATIVE_IPV6) +#if defined(CONFIG_NET_IPV6) const struct in6_addr *net_if_ipv6_select_src_addr(struct net_if *iface, const struct in6_addr *dst); #else @@ -2029,7 +2065,7 @@ static inline const struct in6_addr *net_if_ipv6_select_src_addr( * @return Pointer to IPv6 address to use, NULL if no IPv6 address * could be found. */ -#if defined(CONFIG_NET_NATIVE_IPV6) +#if defined(CONFIG_NET_IPV6) const struct in6_addr *net_if_ipv6_select_src_addr_hint(struct net_if *iface, const struct in6_addr *dst, int flags); @@ -2054,7 +2090,7 @@ static inline const struct in6_addr *net_if_ipv6_select_src_addr_hint( * @return Pointer to network interface to use, NULL if no suitable interface * could be found. */ -#if defined(CONFIG_NET_NATIVE_IPV6) +#if defined(CONFIG_NET_IPV6) struct net_if *net_if_ipv6_select_src_iface(const struct in6_addr *dst); #else static inline struct net_if *net_if_ipv6_select_src_iface( @@ -2431,7 +2467,7 @@ bool net_if_ipv4_is_addr_bcast(struct net_if *iface, * @return Pointer to network interface to use, NULL if no suitable interface * could be found. */ -#if defined(CONFIG_NET_NATIVE_IPV4) +#if defined(CONFIG_NET_IPV4) struct net_if *net_if_ipv4_select_src_iface(const struct in_addr *dst); #else static inline struct net_if *net_if_ipv4_select_src_iface( @@ -2454,7 +2490,7 @@ static inline struct net_if *net_if_ipv4_select_src_iface( * @return Pointer to IPv4 address to use, NULL if no IPv4 address * could be found. */ -#if defined(CONFIG_NET_NATIVE_IPV4) +#if defined(CONFIG_NET_IPV4) const struct in_addr *net_if_ipv4_select_src_addr(struct net_if *iface, const struct in_addr *dst); #else diff --git a/samples/net/sockets/coap_server/src/main.c b/samples/net/sockets/coap_server/src/main.c index 4f476511b2cf..60dd32a000ba 100644 --- a/samples/net/sockets/coap_server/src/main.c +++ b/samples/net/sockets/coap_server/src/main.c @@ -8,6 +8,7 @@ LOG_MODULE_REGISTER(net_coap_service_sample, LOG_LEVEL_DBG); #include +#include #ifdef CONFIG_NET_IPV6 #include "net_private.h" diff --git a/subsys/net/ip/CMakeLists.txt b/subsys/net/ip/CMakeLists.txt index 11509d804d78..0189d748f6a7 100644 --- a/subsys/net/ip/CMakeLists.txt +++ b/subsys/net/ip/CMakeLists.txt @@ -19,7 +19,7 @@ zephyr_library_sources( ) if(CONFIG_NET_OFFLOAD) -zephyr_library_sources(net_context.c net_pkt.c net_tc.c) +zephyr_library_sources(net_context.c net_pkt.c) endif() zephyr_library_sources_ifdef(CONFIG_NET_MGMT_EVENT net_mgmt.c) diff --git a/subsys/net/ip/Kconfig b/subsys/net/ip/Kconfig index 8e525d5ecc59..cb08b3846ca4 100644 --- a/subsys/net/ip/Kconfig +++ b/subsys/net/ip/Kconfig @@ -168,7 +168,7 @@ config NET_IPV4_MAPPING_TO_IPV6 config NET_SHELL bool "Network shell utilities" select SHELL - select NET_IPV4_IGMP if NET_IPV4 + select NET_IPV4_IGMP if NET_NATIVE_IPV4 select REQUIRES_FLOAT_PRINTF help Activate shell module that provides network commands like diff --git a/subsys/net/ip/Kconfig.ipv4 b/subsys/net/ip/Kconfig.ipv4 index a108a015fbed..77c29c8966ce 100644 --- a/subsys/net/ip/Kconfig.ipv4 +++ b/subsys/net/ip/Kconfig.ipv4 @@ -11,6 +11,28 @@ menuconfig NET_IPV4 if NET_IPV4 +config NET_IF_MAX_IPV4_COUNT + int "Max number of IPv4 network interfaces in the system" + default NET_VLAN_COUNT if NET_VLAN + default 2 if NET_LOOPBACK + default 1 + help + This tells how many network interfaces there will be in the system + that will have IPv4 enabled. + +config NET_IF_UNICAST_IPV4_ADDR_COUNT + int "Max number of unicast IPv4 addresses per network interface" + default 2 if NET_IPV4_AUTO + default 2 if NET_LOOPBACK + default 1 + +config NET_IF_MCAST_IPV4_ADDR_COUNT + int "Max number of multicast IPv4 addresses per network interface" + default 2 if NET_IPV4_IGMP + default 1 + +if NET_NATIVE_IPV4 + config NET_INITIAL_TTL int "Initial IPv4 time to live value for unicast packets" default 64 @@ -34,26 +56,6 @@ config NET_INITIAL_MCAST_TTL don't leave the local network unless the application explicitly requests it. -config NET_IF_MAX_IPV4_COUNT - int "Max number of IPv4 network interfaces in the system" - default NET_VLAN_COUNT if NET_VLAN - default 2 if NET_LOOPBACK - default 1 - help - This tells how many network interfaces there will be in the system - that will have IPv4 enabled. - -config NET_IF_UNICAST_IPV4_ADDR_COUNT - int "Max number of unicast IPv4 addresses per network interface" - default 2 if NET_IPV4_AUTO - default 2 if NET_LOOPBACK - default 1 - -config NET_IF_MCAST_IPV4_ADDR_COUNT - int "Max number of multicast IPv4 addresses per network interface" - default 2 if NET_IPV4_IGMP - default 1 - config NET_IF_MCAST_IPV4_SOURCE_COUNT int "Max number of IPv4 sources per mcast address to be included or excluded" default 1 @@ -177,4 +179,5 @@ module-help = Enable debug diagnostic from IPv4 autoconf client. source "subsys/net/Kconfig.template.log_config.net" endif # NET_IPV4_AUTO +endif # NET_NATIVE_IPV4 endif # NET_IPV4 diff --git a/subsys/net/ip/ipv6.h b/subsys/net/ip/ipv6.h index 2dbd1189a6ae..68115f818c26 100644 --- a/subsys/net/ip/ipv6.h +++ b/subsys/net/ip/ipv6.h @@ -198,48 +198,6 @@ static inline int net_ipv6_finalize(struct net_pkt *pkt, } #endif -/** - * @brief Join a given multicast group. - * - * @param iface Network interface where join message is sent - * @param addr Multicast group to join - * - * @return Return 0 if joining was done, <0 otherwise. - */ -#if defined(CONFIG_NET_IPV6_MLD) -int net_ipv6_mld_join(struct net_if *iface, const struct in6_addr *addr); -#else -static inline int -net_ipv6_mld_join(struct net_if *iface, const struct in6_addr *addr) -{ - ARG_UNUSED(iface); - ARG_UNUSED(addr); - - return -ENOTSUP; -} -#endif /* CONFIG_NET_IPV6_MLD */ - -/** - * @brief Leave a given multicast group. - * - * @param iface Network interface where leave message is sent - * @param addr Multicast group to leave - * - * @return Return 0 if leaving is done, <0 otherwise. - */ -#if defined(CONFIG_NET_IPV6_MLD) -int net_ipv6_mld_leave(struct net_if *iface, const struct in6_addr *addr); -#else -static inline int -net_ipv6_mld_leave(struct net_if *iface, const struct in6_addr *addr) -{ - ARG_UNUSED(iface); - ARG_UNUSED(addr); - - return -ENOTSUP; -} -#endif /* CONFIG_NET_IPV6_MLD */ - /** * @brief Send MLDv2 report message with a single entry. * diff --git a/subsys/net/ip/ipv6_mld.c b/subsys/net/ip/ipv6_mld.c index 75644c2ca580..66166fd1d3be 100644 --- a/subsys/net/ip/ipv6_mld.c +++ b/subsys/net/ip/ipv6_mld.c @@ -12,6 +12,7 @@ LOG_MODULE_DECLARE(net_ipv6, CONFIG_NET_IPV6_LOG_LEVEL); #include +#include #include #include #include diff --git a/subsys/net/ip/net_context.c b/subsys/net/ip/net_context.c index af434e950f17..e6a13874bdef 100644 --- a/subsys/net/ip/net_context.c +++ b/subsys/net/ip/net_context.c @@ -2456,7 +2456,7 @@ enum net_verdict net_context_packet_received(struct net_conn *conn, return verdict; } -#if defined(CONFIG_NET_UDP) +#if defined(CONFIG_NET_NATIVE_UDP) static int recv_udp(struct net_context *context, net_context_recv_cb_t cb, k_timeout_t timeout, @@ -2538,7 +2538,7 @@ static int recv_udp(struct net_context *context, } #else #define recv_udp(...) 0 -#endif /* CONFIG_NET_UDP */ +#endif /* CONFIG_NET_NATIVE_UDP */ static enum net_verdict net_context_raw_packet_received( struct net_conn *conn, diff --git a/subsys/net/ip/net_core.c b/subsys/net/ip/net_core.c index 8ab2fb93a3c8..5259f9c6e6d7 100644 --- a/subsys/net/ip/net_core.c +++ b/subsys/net/ip/net_core.c @@ -60,6 +60,7 @@ LOG_MODULE_REGISTER(net_core, CONFIG_NET_CORE_LOG_LEVEL); #include "net_stats.h" +#if defined(CONFIG_NET_NATIVE) static inline enum net_verdict process_data(struct net_pkt *pkt, bool is_loopback) { @@ -188,22 +189,6 @@ static void net_post_init(void) #endif } -static void init_rx_queues(void) -{ - /* Starting TX side. The ordering is important here and the TX - * can only be started when RX side is ready to receive packets. - */ - net_if_init(); - - net_tc_rx_init(); - - /* This will take the interface up and start everything. */ - net_if_post_init(); - - /* Things to init after network interface is working */ - net_post_init(); -} - static inline void copy_ll_addr(struct net_pkt *pkt) { memcpy(net_pkt_lladdr_src(pkt), net_pkt_lladdr_if(pkt), @@ -571,6 +556,39 @@ static inline void l3_init(void) NET_DBG("Network L3 init done"); } +#else /* CONFIG_NET_NATIVE */ +#define l3_init(...) +#define net_post_init(...) +int net_send_data(struct net_pkt *pkt) +{ + ARG_UNUSED(pkt); + + return -ENOTSUP; +} +int net_recv_data(struct net_if *iface, struct net_pkt *pkt) +{ + ARG_UNUSED(iface); + ARG_UNUSED(pkt); + + return -ENOTSUP; +} +#endif /* CONFIG_NET_NATIVE */ + +static void init_rx_queues(void) +{ + /* Starting TX side. The ordering is important here and the TX + * can only be started when RX side is ready to receive packets. + */ + net_if_init(); + + net_tc_rx_init(); + + /* This will take the interface up and start everything. */ + net_if_post_init(); + + /* Things to init after network interface is working */ + net_post_init(); +} static inline int services_init(void) { diff --git a/subsys/net/ip/net_if.c b/subsys/net/ip/net_if.c index 54c4f24f3680..8bceee7a29d9 100644 --- a/subsys/net/ip/net_if.c +++ b/subsys/net/ip/net_if.c @@ -17,6 +17,7 @@ LOG_MODULE_REGISTER(net_if, CONFIG_NET_IF_LOG_LEVEL); #include #include #include +#include #include #include #include @@ -84,14 +85,16 @@ static sys_slist_t active_dad_timers; static struct k_work_delayable rs_timer; static sys_slist_t active_rs_timers; #endif +#endif /* CONFIG_NET_NATIVE_IPV6 */ +#if defined(CONFIG_NET_IPV6) static struct { struct net_if_ipv6 ipv6; struct net_if *iface; } ipv6_addresses[CONFIG_NET_IF_MAX_IPV6_COUNT]; #endif /* CONFIG_NET_NATIVE_IPV6 */ -#if defined(CONFIG_NET_NATIVE_IPV4) +#if defined(CONFIG_NET_IPV4) static struct { struct net_if_ipv4 ipv4; struct net_if *iface; @@ -172,6 +175,7 @@ struct net_if *z_vrfy_net_if_get_by_index(int index) #include #endif +#if defined(CONFIG_NET_NATIVE) static inline void net_context_send_cb(struct net_context *context, int status) { @@ -380,6 +384,7 @@ void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt) ; } } +#endif /* CONFIG_NET_NATIVE */ void net_if_stats_reset(struct net_if *iface) { @@ -443,6 +448,7 @@ static inline void init_iface(struct net_if *iface) net_ipv6_pe_init(iface); } +#if defined(CONFIG_NET_NATIVE) enum net_verdict net_if_send_data(struct net_if *iface, struct net_pkt *pkt) { const struct net_l2 *l2; @@ -549,6 +555,7 @@ enum net_verdict net_if_send_data(struct net_if *iface, struct net_pkt *pkt) return verdict; } +#endif /* CONFIG_NET_NATIVE */ int net_if_set_link_addr_locked(struct net_if *iface, uint8_t *addr, uint8_t len, @@ -674,7 +681,7 @@ static enum net_l2_flags l2_flags_get(struct net_if *iface) return flags; } -#if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6) +#if defined(CONFIG_NET_IP) /* Return how many bits are shared between two IP addresses */ static uint8_t get_ipaddr_diff(const uint8_t *src, const uint8_t *dst, int addr_len) { @@ -700,7 +707,9 @@ static uint8_t get_ipaddr_diff(const uint8_t *src, const uint8_t *dst, int addr_ return len; } +#endif /* CONFIG_NET_IP */ +#if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6) static struct net_if_router *iface_router_lookup(struct net_if *iface, uint8_t family, void *addr) { @@ -1030,7 +1039,7 @@ void net_if_mcast_monitor(struct net_if *iface, #define net_if_mcast_monitor(...) #endif /* CONFIG_NET_NATIVE_IPV4 || CONFIG_NET_NATIVE_IPV6 */ -#if defined(CONFIG_NET_NATIVE_IPV6) +#if defined(CONFIG_NET_IPV6) int net_if_config_ipv6_get(struct net_if *iface, struct net_if_ipv6 **ipv6) { int ret = 0; @@ -1118,6 +1127,7 @@ int net_if_config_ipv6_put(struct net_if *iface) return ret; } +#if defined(CONFIG_NET_NATIVE_IPV6) #if defined(CONFIG_NET_IPV6_MLD) static void join_mcast_allnodes(struct net_if *iface) { @@ -1579,110 +1589,6 @@ void net_if_nbr_reachability_hint(struct net_if *iface, const struct in6_addr *i #endif -struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr, - struct net_if **ret) -{ - struct net_if_addr *ifaddr = NULL; - - STRUCT_SECTION_FOREACH(net_if, iface) { - struct net_if_ipv6 *ipv6; - - net_if_lock(iface); - - ipv6 = iface->config.ip.ipv6; - if (!ipv6) { - net_if_unlock(iface); - continue; - } - - ARRAY_FOR_EACH(ipv6->unicast, i) { - if (!ipv6->unicast[i].is_used || - ipv6->unicast[i].address.family != AF_INET6) { - continue; - } - - if (net_ipv6_is_prefix( - addr->s6_addr, - ipv6->unicast[i].address.in6_addr.s6_addr, - 128)) { - - if (ret) { - *ret = iface; - } - - ifaddr = &ipv6->unicast[i]; - net_if_unlock(iface); - goto out; - } - } - - net_if_unlock(iface); - } - -out: - return ifaddr; -} - -struct net_if_addr *net_if_ipv6_addr_lookup_by_iface(struct net_if *iface, - struct in6_addr *addr) -{ - struct net_if_addr *ifaddr = NULL; - struct net_if_ipv6 *ipv6; - - net_if_lock(iface); - - ipv6 = iface->config.ip.ipv6; - if (!ipv6) { - goto out; - } - - ARRAY_FOR_EACH(ipv6->unicast, i) { - if (!ipv6->unicast[i].is_used || - ipv6->unicast[i].address.family != AF_INET6) { - continue; - } - - if (net_ipv6_is_prefix( - addr->s6_addr, - ipv6->unicast[i].address.in6_addr.s6_addr, - 128)) { - ifaddr = &ipv6->unicast[i]; - goto out; - } - } - -out: - net_if_unlock(iface); - - return ifaddr; -} - -int z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr *addr) -{ - struct net_if *iface = NULL; - struct net_if_addr *if_addr; - - if_addr = net_if_ipv6_addr_lookup(addr, &iface); - if (!if_addr) { - return 0; - } - - return net_if_get_by_iface(iface); -} - -#ifdef CONFIG_USERSPACE -static inline int z_vrfy_net_if_ipv6_addr_lookup_by_index( - const struct in6_addr *addr) -{ - struct in6_addr addr_v6; - - K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6))); - - return z_impl_net_if_ipv6_addr_lookup_by_index(&addr_v6); -} -#include -#endif - /* To be called when interface comes up so that all the non-joined multicast * groups are joined. */ @@ -1845,6 +1751,119 @@ static void address_start_timer(struct net_if_addr *ifaddr, uint32_t vlifetime) net_timeout_set(&ifaddr->lifetime, vlifetime, k_uptime_get_32()); k_work_reschedule(&address_lifetime_timer, K_NO_WAIT); } +#else /* CONFIG_NET_NATIVE_IPV6 */ +#define address_start_timer(...) +static inline void net_if_ipv6_start_dad(struct net_if *iface, + struct net_if_addr *ifaddr) +{ + ifaddr->addr_state = NET_ADDR_PREFERRED; +} +#define join_mcast_nodes(...) +#endif /* CONFIG_NET_NATIVE_IPV6 */ + +struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr, + struct net_if **ret) +{ + struct net_if_addr *ifaddr = NULL; + + STRUCT_SECTION_FOREACH(net_if, iface) { + struct net_if_ipv6 *ipv6; + + net_if_lock(iface); + + ipv6 = iface->config.ip.ipv6; + if (!ipv6) { + net_if_unlock(iface); + continue; + } + + ARRAY_FOR_EACH(ipv6->unicast, i) { + if (!ipv6->unicast[i].is_used || + ipv6->unicast[i].address.family != AF_INET6) { + continue; + } + + if (net_ipv6_is_prefix( + addr->s6_addr, + ipv6->unicast[i].address.in6_addr.s6_addr, + 128)) { + + if (ret) { + *ret = iface; + } + + ifaddr = &ipv6->unicast[i]; + net_if_unlock(iface); + goto out; + } + } + + net_if_unlock(iface); + } + +out: + return ifaddr; +} + +struct net_if_addr *net_if_ipv6_addr_lookup_by_iface(struct net_if *iface, + struct in6_addr *addr) +{ + struct net_if_addr *ifaddr = NULL; + struct net_if_ipv6 *ipv6; + + net_if_lock(iface); + + ipv6 = iface->config.ip.ipv6; + if (!ipv6) { + goto out; + } + + ARRAY_FOR_EACH(ipv6->unicast, i) { + if (!ipv6->unicast[i].is_used || + ipv6->unicast[i].address.family != AF_INET6) { + continue; + } + + if (net_ipv6_is_prefix( + addr->s6_addr, + ipv6->unicast[i].address.in6_addr.s6_addr, + 128)) { + ifaddr = &ipv6->unicast[i]; + goto out; + } + } + +out: + net_if_unlock(iface); + + return ifaddr; +} + +int z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr *addr) +{ + struct net_if *iface = NULL; + struct net_if_addr *if_addr; + + if_addr = net_if_ipv6_addr_lookup(addr, &iface); + if (!if_addr) { + return 0; + } + + return net_if_get_by_iface(iface); +} + +#ifdef CONFIG_USERSPACE +static inline int z_vrfy_net_if_ipv6_addr_lookup_by_index( + const struct in6_addr *addr) +{ + struct in6_addr addr_v6; + + K_OOPS(k_usermode_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6))); + + return z_impl_net_if_ipv6_addr_lookup_by_index(&addr_v6); +} +#include +#endif void net_if_ipv6_addr_update_lifetime(struct net_if_addr *ifaddr, uint32_t vlifetime) @@ -2307,31 +2326,143 @@ void net_if_ipv6_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr net_if_unlock(iface); } -static void remove_prefix_addresses(struct net_if *iface, - struct net_if_ipv6 *ipv6, - struct in6_addr *addr, - uint8_t len) +struct in6_addr *net_if_ipv6_get_ll(struct net_if *iface, + enum net_addr_state addr_state) { + struct in6_addr *addr = NULL; + struct net_if_ipv6 *ipv6; + + net_if_lock(iface); + + ipv6 = iface->config.ip.ipv6; + if (!ipv6) { + goto out; + } + ARRAY_FOR_EACH(ipv6->unicast, i) { if (!ipv6->unicast[i].is_used || - ipv6->unicast[i].address.family != AF_INET6 || - ipv6->unicast[i].addr_type != NET_ADDR_AUTOCONF) { + (addr_state != NET_ADDR_ANY_STATE && + ipv6->unicast[i].addr_state != addr_state) || + ipv6->unicast[i].address.family != AF_INET6) { continue; } - if (net_ipv6_is_prefix( - addr->s6_addr, - ipv6->unicast[i].address.in6_addr.s6_addr, - len)) { - net_if_ipv6_addr_rm(iface, - &ipv6->unicast[i].address.in6_addr); + if (net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) { + addr = &ipv6->unicast[i].address.in6_addr; + goto out; } } -} -static void prefix_lifetime_expired(struct net_if_ipv6_prefix *ifprefix) -{ - struct net_if_ipv6 *ipv6; +out: + net_if_unlock(iface); + + return addr; +} + +struct in6_addr *net_if_ipv6_get_ll_addr(enum net_addr_state state, + struct net_if **iface) +{ + struct in6_addr *addr = NULL; + + STRUCT_SECTION_FOREACH(net_if, tmp) { + net_if_lock(tmp); + + addr = net_if_ipv6_get_ll(tmp, state); + if (addr) { + if (iface) { + *iface = tmp; + } + + net_if_unlock(tmp); + goto out; + } + + net_if_unlock(tmp); + } + +out: + return addr; +} + +static inline struct in6_addr *check_global_addr(struct net_if *iface, + enum net_addr_state state) +{ + struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6; + + if (!ipv6) { + return NULL; + } + + ARRAY_FOR_EACH(ipv6->unicast, i) { + if (!ipv6->unicast[i].is_used || + (ipv6->unicast[i].addr_state != state) || + ipv6->unicast[i].address.family != AF_INET6) { + continue; + } + + if (!net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) { + return &ipv6->unicast[i].address.in6_addr; + } + } + + return NULL; +} + +struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state, + struct net_if **iface) +{ + struct in6_addr *addr = NULL; + + STRUCT_SECTION_FOREACH(net_if, tmp) { + if (iface && *iface && tmp != *iface) { + continue; + } + + net_if_lock(tmp); + addr = check_global_addr(tmp, state); + if (addr) { + if (iface) { + *iface = tmp; + } + + net_if_unlock(tmp); + goto out; + } + + net_if_unlock(tmp); + } + +out: + + return addr; +} + +#if defined(CONFIG_NET_NATIVE_IPV6) +static void remove_prefix_addresses(struct net_if *iface, + struct net_if_ipv6 *ipv6, + struct in6_addr *addr, + uint8_t len) +{ + ARRAY_FOR_EACH(ipv6->unicast, i) { + if (!ipv6->unicast[i].is_used || + ipv6->unicast[i].address.family != AF_INET6 || + ipv6->unicast[i].addr_type != NET_ADDR_AUTOCONF) { + continue; + } + + if (net_ipv6_is_prefix( + addr->s6_addr, + ipv6->unicast[i].address.in6_addr.s6_addr, + len)) { + net_if_ipv6_addr_rm(iface, + &ipv6->unicast[i].address.in6_addr); + } + } +} + +static void prefix_lifetime_expired(struct net_if_ipv6_prefix *ifprefix) +{ + struct net_if_ipv6 *ipv6; net_if_lock(ifprefix->iface); @@ -2852,116 +2983,7 @@ void net_if_ipv6_set_hop_limit(struct net_if *iface, uint8_t hop_limit) net_if_unlock(iface); } -struct in6_addr *net_if_ipv6_get_ll(struct net_if *iface, - enum net_addr_state addr_state) -{ - struct in6_addr *addr = NULL; - struct net_if_ipv6 *ipv6; - - net_if_lock(iface); - - ipv6 = iface->config.ip.ipv6; - if (!ipv6) { - goto out; - } - - ARRAY_FOR_EACH(ipv6->unicast, i) { - if (!ipv6->unicast[i].is_used || - (addr_state != NET_ADDR_ANY_STATE && - ipv6->unicast[i].addr_state != addr_state) || - ipv6->unicast[i].address.family != AF_INET6) { - continue; - } - - if (net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) { - addr = &ipv6->unicast[i].address.in6_addr; - goto out; - } - } - -out: - net_if_unlock(iface); - - return addr; -} - -struct in6_addr *net_if_ipv6_get_ll_addr(enum net_addr_state state, - struct net_if **iface) -{ - struct in6_addr *addr = NULL; - - STRUCT_SECTION_FOREACH(net_if, tmp) { - net_if_lock(tmp); - - addr = net_if_ipv6_get_ll(tmp, state); - if (addr) { - if (iface) { - *iface = tmp; - } - - net_if_unlock(tmp); - goto out; - } - - net_if_unlock(tmp); - } - -out: - return addr; -} - -static inline struct in6_addr *check_global_addr(struct net_if *iface, - enum net_addr_state state) -{ - struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6; - - if (!ipv6) { - return NULL; - } - - ARRAY_FOR_EACH(ipv6->unicast, i) { - if (!ipv6->unicast[i].is_used || - (ipv6->unicast[i].addr_state != state) || - ipv6->unicast[i].address.family != AF_INET6) { - continue; - } - - if (!net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) { - return &ipv6->unicast[i].address.in6_addr; - } - } - - return NULL; -} - -struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state, - struct net_if **iface) -{ - struct in6_addr *addr = NULL; - - STRUCT_SECTION_FOREACH(net_if, tmp) { - if (iface && *iface && tmp != *iface) { - continue; - } - - net_if_lock(tmp); - addr = check_global_addr(tmp, state); - if (addr) { - if (iface) { - *iface = tmp; - } - - net_if_unlock(tmp); - goto out; - } - - net_if_unlock(tmp); - } - -out: - - return addr; -} +#endif /* CONFIG_NET_NATIVE_IPV6 */ static uint8_t get_diff_ipv6(const struct in6_addr *src, const struct in6_addr *dst) @@ -3188,6 +3210,8 @@ struct net_if *net_if_ipv6_select_src_iface(const struct in6_addr *dst) return iface; } +#if defined(CONFIG_NET_NATIVE_IPV6) + uint32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6) { uint32_t min_reachable, max_reachable; @@ -3264,17 +3288,8 @@ static void iface_ipv6_init(int if_count) net_if_ipv6_set_reachable_time(&ipv6_addresses[i].ipv6); } } - -#else /* CONFIG_NET_NATIVE_IPV6 */ -#define join_mcast_allnodes(...) -#define join_mcast_solicit_node(...) -#define leave_mcast_all(...) -#define clear_joined_ipv6_mcast_groups(...) -#define join_mcast_nodes(...) -#define iface_ipv6_start(...) -#define iface_ipv6_stop(...) -#define iface_ipv6_init(...) - +#endif /* CONFIG_NET_NATIVE_IPV6 */ +#else /* CONFIG_NET_IPV6 */ struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *addr, struct net_if **iface) { @@ -3301,9 +3316,18 @@ struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state, return NULL; } -#endif /* CONFIG_NET_NATIVE_IPV6 */ +#endif /* CONFIG_NET_IPV6 */ -#if defined(CONFIG_NET_NATIVE_IPV4) +#if !defined(CONFIG_NET_NATIVE_IPV6) +#define join_mcast_allnodes(...) +#define leave_mcast_all(...) +#define clear_joined_ipv6_mcast_groups(...) +#define iface_ipv6_start(...) +#define iface_ipv6_stop(...) +#define iface_ipv6_init(...) +#endif /* !CONFIG_NET_NATIVE_IPV4 */ + +#if defined(CONFIG_NET_IPV4) int net_if_config_ipv4_get(struct net_if *iface, struct net_if_ipv4 **ipv4) { int ret = 0; @@ -3389,107 +3413,6 @@ int net_if_config_ipv4_put(struct net_if *iface) return ret; } -uint8_t net_if_ipv4_get_ttl(struct net_if *iface) -{ - int ret = 0; - - net_if_lock(iface); - - if (net_if_config_ipv4_get(iface, NULL) < 0) { - goto out; - } - - if (!iface->config.ip.ipv4) { - goto out; - } - - ret = iface->config.ip.ipv4->ttl; -out: - net_if_unlock(iface); - - return ret; -} - -void net_if_ipv4_set_ttl(struct net_if *iface, uint8_t ttl) -{ - net_if_lock(iface); - - if (net_if_config_ipv4_get(iface, NULL) < 0) { - goto out; - } - - if (!iface->config.ip.ipv4) { - goto out; - } - - iface->config.ip.ipv4->ttl = ttl; -out: - net_if_unlock(iface); -} - -uint8_t net_if_ipv4_get_mcast_ttl(struct net_if *iface) -{ - int ret = 0; - - net_if_lock(iface); - - if (net_if_config_ipv4_get(iface, NULL) < 0) { - goto out; - } - - if (!iface->config.ip.ipv4) { - goto out; - } - - ret = iface->config.ip.ipv4->mcast_ttl; -out: - net_if_unlock(iface); - - return ret; -} - -void net_if_ipv4_set_mcast_ttl(struct net_if *iface, uint8_t ttl) -{ - net_if_lock(iface); - - if (net_if_config_ipv4_get(iface, NULL) < 0) { - goto out; - } - - if (!iface->config.ip.ipv4) { - goto out; - } - - iface->config.ip.ipv4->mcast_ttl = ttl; -out: - net_if_unlock(iface); -} - -struct net_if_router *net_if_ipv4_router_lookup(struct net_if *iface, - struct in_addr *addr) -{ - return iface_router_lookup(iface, AF_INET, addr); -} - -struct net_if_router *net_if_ipv4_router_find_default(struct net_if *iface, - struct in_addr *addr) -{ - return iface_router_find_default(iface, AF_INET, addr); -} - -struct net_if_router *net_if_ipv4_router_add(struct net_if *iface, - struct in_addr *addr, - bool is_default, - uint16_t lifetime) -{ - return iface_router_add(iface, AF_INET, addr, is_default, lifetime); -} - -bool net_if_ipv4_router_rm(struct net_if_router *router) -{ - return iface_router_rm(router); -} - bool net_if_ipv4_addr_mask_cmp(struct net_if *iface, const struct in_addr *addr) { @@ -4679,6 +4602,109 @@ void net_if_ipv4_maddr_join(struct net_if *iface, struct net_if_mcast_addr *addr net_if_unlock(iface); } +#if defined(CONFIG_NET_NATIVE_IPV4) +uint8_t net_if_ipv4_get_ttl(struct net_if *iface) +{ + int ret = 0; + + net_if_lock(iface); + + if (net_if_config_ipv4_get(iface, NULL) < 0) { + goto out; + } + + if (!iface->config.ip.ipv4) { + goto out; + } + + ret = iface->config.ip.ipv4->ttl; +out: + net_if_unlock(iface); + + return ret; +} + +void net_if_ipv4_set_ttl(struct net_if *iface, uint8_t ttl) +{ + net_if_lock(iface); + + if (net_if_config_ipv4_get(iface, NULL) < 0) { + goto out; + } + + if (!iface->config.ip.ipv4) { + goto out; + } + + iface->config.ip.ipv4->ttl = ttl; +out: + net_if_unlock(iface); +} + +uint8_t net_if_ipv4_get_mcast_ttl(struct net_if *iface) +{ + int ret = 0; + + net_if_lock(iface); + + if (net_if_config_ipv4_get(iface, NULL) < 0) { + goto out; + } + + if (!iface->config.ip.ipv4) { + goto out; + } + + ret = iface->config.ip.ipv4->mcast_ttl; +out: + net_if_unlock(iface); + + return ret; +} + +void net_if_ipv4_set_mcast_ttl(struct net_if *iface, uint8_t ttl) +{ + net_if_lock(iface); + + if (net_if_config_ipv4_get(iface, NULL) < 0) { + goto out; + } + + if (!iface->config.ip.ipv4) { + goto out; + } + + iface->config.ip.ipv4->mcast_ttl = ttl; +out: + net_if_unlock(iface); +} + +struct net_if_router *net_if_ipv4_router_lookup(struct net_if *iface, + struct in_addr *addr) +{ + return iface_router_lookup(iface, AF_INET, addr); +} + +struct net_if_router *net_if_ipv4_router_find_default(struct net_if *iface, + struct in_addr *addr) +{ + return iface_router_find_default(iface, AF_INET, addr); +} + +struct net_if_router *net_if_ipv4_router_add(struct net_if *iface, + struct in_addr *addr, + bool is_default, + uint16_t lifetime) +{ + return iface_router_add(iface, AF_INET, addr, is_default, lifetime); +} + +bool net_if_ipv4_router_rm(struct net_if_router *router) +{ + return iface_router_rm(router); +} + + static void iface_ipv4_init(int if_count) { int i; @@ -4725,13 +4751,8 @@ static void iface_ipv4_start(struct net_if *iface) net_if_start_acd(iface); } } - - -#else /* CONFIG_NET_NATIVE_IPV4 */ -#define leave_ipv4_mcast_all(...) -#define iface_ipv4_init(...) -#define iface_ipv4_start(...) - +#endif /* CONFIG_NET_NATIVE_IPV4 */ +#else /* CONFIG_NET_IPV4 */ struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *addr, struct net_if **iface) { @@ -4758,7 +4779,13 @@ struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface, return NULL; } -#endif /* CONFIG_NET_NATIVE_IPV4 */ +#endif /* CONFIG_NET_IPV4 */ + +#if !defined(CONFIG_NET_NATIVE_IPV4) +#define leave_ipv4_mcast_all(...) +#define iface_ipv4_init(...) +#define iface_ipv4_start(...) +#endif /* !CONFIG_NET_NATIVE_IPV4 */ struct net_if *net_if_select_src_iface(const struct sockaddr *dst) { @@ -4797,12 +4824,15 @@ static struct net_if_addr *get_ifaddr(struct net_if *iface, if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) { struct net_if_ipv6 *ipv6 = - COND_CODE_1(CONFIG_NET_NATIVE_IPV6, (iface->config.ip.ipv6), (NULL)); - + COND_CODE_1(CONFIG_NET_IPV6, (iface->config.ip.ipv6), (NULL)); struct in6_addr maddr; unsigned int maddr_count = 0; int found = -1; + if (ipv6 == NULL) { + goto out; + } + net_ipv6_addr_create_solicited_node((struct in6_addr *)addr, &maddr); @@ -4844,7 +4874,11 @@ static struct net_if_addr *get_ifaddr(struct net_if *iface, if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) { struct net_if_ipv4 *ipv4 = - COND_CODE_1(CONFIG_NET_NATIVE_IPV4, (iface->config.ip.ipv4), (NULL)); + COND_CODE_1(CONFIG_NET_IPV4, (iface->config.ip.ipv4), (NULL)); + + if (ipv4 == NULL) { + goto out; + } ARRAY_FOR_EACH(ipv4->unicast, i) { if (!ipv4->unicast[i].ipv4.is_used) { @@ -4876,7 +4910,7 @@ static void remove_ipv6_ifaddr(struct net_if *iface, net_if_lock(iface); - ipv6 = COND_CODE_1(CONFIG_NET_NATIVE_IPV6, (iface->config.ip.ipv6), (NULL)); + ipv6 = COND_CODE_1(CONFIG_NET_IPV6, (iface->config.ip.ipv6), (NULL)); if (!ipv6) { goto out; } @@ -4934,7 +4968,7 @@ static void remove_ipv4_ifaddr(struct net_if *iface, net_if_lock(iface); - ipv4 = COND_CODE_1(CONFIG_NET_NATIVE_IPV4, (iface->config.ip.ipv4), (NULL)); + ipv4 = COND_CODE_1(CONFIG_NET_IPV4, (iface->config.ip.ipv4), (NULL)); if (!ipv4) { goto out; } diff --git a/subsys/net/ip/net_private.h b/subsys/net/ip/net_private.h index 0bbcbb231649..04c3205f5aec 100644 --- a/subsys/net/ip/net_private.h +++ b/subsys/net/ip/net_private.h @@ -86,16 +86,12 @@ extern bool net_context_is_v6only_set(struct net_context *context); extern bool net_context_is_recv_pktinfo_set(struct net_context *context); extern bool net_context_is_timestamping_set(struct net_context *context); extern void net_pkt_init(void); -extern void net_tc_tx_init(void); -extern void net_tc_rx_init(void); int net_context_get_local_addr(struct net_context *context, struct sockaddr *addr, socklen_t *addrlen); #else static inline void net_context_init(void) { } static inline void net_pkt_init(void) { } -static inline void net_tc_tx_init(void) { } -static inline void net_tc_rx_init(void) { } static inline const char *net_context_state(struct net_context *context) { ARG_UNUSED(context); @@ -149,6 +145,8 @@ static inline void mdns_init_responder(void) { } #if defined(CONFIG_NET_NATIVE) enum net_verdict net_ipv4_input(struct net_pkt *pkt, bool is_loopback); enum net_verdict net_ipv6_input(struct net_pkt *pkt, bool is_loopback); +extern void net_tc_tx_init(void); +extern void net_tc_rx_init(void); #else static inline enum net_verdict net_ipv4_input(struct net_pkt *pkt, bool is_loopback) @@ -167,6 +165,9 @@ static inline enum net_verdict net_ipv6_input(struct net_pkt *pkt, return NET_CONTINUE; } + +static inline void net_tc_tx_init(void) { } +static inline void net_tc_rx_init(void) { } #endif extern bool net_tc_submit_to_tx_queue(uint8_t tc, struct net_pkt *pkt); extern void net_tc_submit_to_rx_queue(uint8_t tc, struct net_pkt *pkt); diff --git a/subsys/net/ip/utils.c b/subsys/net/ip/utils.c index e8b6b99b073b..f130a184bcb2 100644 --- a/subsys/net/ip/utils.c +++ b/subsys/net/ip/utils.c @@ -634,7 +634,7 @@ static inline uint16_t pkt_calc_chksum(struct net_pkt *pkt, uint16_t sum) return sum; } -#if defined(CONFIG_NET_IP) +#if defined(CONFIG_NET_NATIVE_IP) uint16_t net_calc_chksum(struct net_pkt *pkt, uint8_t proto) { size_t len = 0U; @@ -684,7 +684,7 @@ uint16_t net_calc_chksum(struct net_pkt *pkt, uint8_t proto) } #endif -#if defined(CONFIG_NET_IPV4) +#if defined(CONFIG_NET_NATIVE_IPV4) uint16_t net_calc_chksum_ipv4(struct net_pkt *pkt) { uint16_t sum; @@ -697,7 +697,7 @@ uint16_t net_calc_chksum_ipv4(struct net_pkt *pkt) return ~sum; } -#endif /* CONFIG_NET_IPV4 */ +#endif /* CONFIG_NET_NATIVE_IPV4 */ #if defined(CONFIG_NET_IPV4_IGMP) uint16_t net_calc_chksum_igmp(struct net_pkt *pkt) diff --git a/subsys/net/lib/dns/llmnr_responder.c b/subsys/net/lib/dns/llmnr_responder.c index 21832efdf670..133fce7ba60c 100644 --- a/subsys/net/lib/dns/llmnr_responder.c +++ b/subsys/net/lib/dns/llmnr_responder.c @@ -21,6 +21,7 @@ LOG_MODULE_REGISTER(net_llmnr_responder, CONFIG_LLMNR_RESPONDER_LOG_LEVEL); #include #include +#include #include #include #include diff --git a/subsys/net/lib/dns/mdns_responder.c b/subsys/net/lib/dns/mdns_responder.c index 43b07d694c52..93b620160900 100644 --- a/subsys/net/lib/dns/mdns_responder.c +++ b/subsys/net/lib/dns/mdns_responder.c @@ -22,6 +22,7 @@ LOG_MODULE_REGISTER(net_mdns_responder, CONFIG_MDNS_RESPONDER_LOG_LEVEL); #include #include +#include #include #include #include diff --git a/subsys/net/lib/shell/iface.c b/subsys/net/lib/shell/iface.c index 7aa03c757306..c3a3428a546d 100644 --- a/subsys/net/lib/shell/iface.c +++ b/subsys/net/lib/shell/iface.c @@ -91,7 +91,6 @@ static void print_phy_link_state(const struct shell *sh, const struct device *ph } #endif -#if defined(CONFIG_NET_NATIVE) static const char *iface_flags2str(struct net_if *iface) { static char str[sizeof("POINTOPOINT") + sizeof("PROMISC") + @@ -148,17 +147,17 @@ static const char *iface_flags2str(struct net_if *iface) return str; } -#endif static void iface_cb(struct net_if *iface, void *user_data) { -#if defined(CONFIG_NET_NATIVE) struct net_shell_user_data *data = user_data; const struct shell *sh = data->sh; -#if defined(CONFIG_NET_IPV6) +#if defined(CONFIG_NET_NATIVE_IPV6) struct net_if_ipv6_prefix *prefix; struct net_if_router *router; +#endif +#if defined(CONFIG_NET_IPV6) struct net_if_ipv6 *ipv6; #endif #if defined(CONFIG_NET_IPV4) @@ -393,6 +392,7 @@ static void iface_cb(struct net_if *iface, void *user_data) PR("\t\n"); } +#if defined(CONFIG_NET_NATIVE_IPV6) count = 0; PR("IPv6 prefixes (max %d):\n", NET_IF_MAX_IPV6_PREFIX); @@ -421,6 +421,7 @@ static void iface_cb(struct net_if *iface, void *user_data) net_sprint_ipv6_addr(&router->address.in6_addr), router->is_infinite ? " infinite" : ""); } +#endif /* CONFIG_NET_NATIVE_IPV6 */ skip_ipv6: @@ -532,12 +533,6 @@ static void iface_cb(struct net_if *iface, void *user_data) iface->config.dhcpv4.attempts); } #endif /* CONFIG_NET_DHCPV4 */ - -#else - ARG_UNUSED(iface); - ARG_UNUSED(user_data); - -#endif /* CONFIG_NET_NATIVE */ } static int cmd_net_set_mac(const struct shell *sh, size_t argc, char *argv[]) diff --git a/subsys/net/lib/shell/ipv4.c b/subsys/net/lib/shell/ipv4.c index 74afea9a54ac..d70dc786828b 100644 --- a/subsys/net/lib/shell/ipv4.c +++ b/subsys/net/lib/shell/ipv4.c @@ -13,7 +13,7 @@ LOG_MODULE_DECLARE(net_shell); #include "net_shell_private.h" #include "../ip/ipv4.h" -#if defined(CONFIG_NET_NATIVE_IPV4) +#if defined(CONFIG_NET_IPV4) static void ip_address_lifetime_cb(struct net_if *iface, void *user_data) { struct net_shell_user_data *data = user_data; @@ -50,7 +50,7 @@ static void ip_address_lifetime_cb(struct net_if *iface, void *user_data) &ipv4->unicast[i].netmask)); } } -#endif /* CONFIG_NET_NATIVE_IPV4 */ +#endif /* CONFIG_NET_IPV4 */ static int cmd_net_ipv4(const struct shell *sh, size_t argc, char *argv[]) { @@ -62,14 +62,17 @@ static int cmd_net_ipv4(const struct shell *sh, size_t argc, char *argv[]) } #if defined(CONFIG_NET_NATIVE_IPV4) - struct net_shell_user_data user_data; - PR("IPv4 fragmentation support : %s\n", IS_ENABLED(CONFIG_NET_IPV4_FRAGMENT) ? "enabled" : "disabled"); PR("IPv4 conflict detection support : %s\n", IS_ENABLED(CONFIG_NET_IPV4_ACD) ? "enabled" : "disabled"); +#endif /* CONFIG_NET_NATIVE_IPV4 */ + +#if defined(CONFIG_NET_IPV4) + struct net_shell_user_data user_data; + PR("Max number of IPv4 network interfaces " "in the system : %d\n", CONFIG_NET_IF_MAX_IPV4_COUNT); @@ -85,14 +88,14 @@ static int cmd_net_ipv4(const struct shell *sh, size_t argc, char *argv[]) /* Print information about address lifetime */ net_if_foreach(ip_address_lifetime_cb, &user_data); -#endif +#endif /* CONFIG_NET_IPV4 */ return 0; } static int cmd_net_ip_add(const struct shell *sh, size_t argc, char *argv[]) { -#if defined(CONFIG_NET_NATIVE_IPV4) +#if defined(CONFIG_NET_IPV4) struct net_if *iface = NULL; int idx; struct in_addr addr; @@ -151,16 +154,15 @@ static int cmd_net_ip_add(const struct shell *sh, size_t argc, char *argv[]) net_if_ipv4_set_netmask_by_addr(iface, &addr, &netmask); } -#else /* CONFIG_NET_NATIVE_IPV4 */ - PR_INFO("Set %s and %s to enable native %s support.\n", - "CONFIG_NET_NATIVE", "CONFIG_NET_IPV4", "IPv4"); -#endif /* CONFIG_NET_NATIVE_IPV4 */ +#else /* CONFIG_NET_IPV4 */ + PR_INFO("Set %s to enable %s support.\n", "CONFIG_NET_IPV4", "IPv4"); +#endif /* CONFIG_NET_IPV4 */ return 0; } static int cmd_net_ip_del(const struct shell *sh, size_t argc, char *argv[]) { -#if defined(CONFIG_NET_NATIVE_IPV4) +#if defined(CONFIG_NET_IPV4) struct net_if *iface = NULL; int idx; struct in_addr addr; @@ -201,16 +203,15 @@ static int cmd_net_ip_del(const struct shell *sh, size_t argc, char *argv[]) return -ENOEXEC; } } -#else /* CONFIG_NET_NATIVE_IPV4 */ - PR_INFO("Set %s and %s to enable native %s support.\n", - "CONFIG_NET_NATIVE", "CONFIG_NET_IPV4", "IPv4"); -#endif /* CONFIG_NET_NATIVE_IPV4 */ +#else /* CONFIG_NET_IPV4 */ + PR_INFO("Set %s to enable %s support.\n", "CONFIG_NET_IPV4", "IPv4"); +#endif /* CONFIG_NET_IPV4 */ return 0; } static int cmd_net_ip_gateway(const struct shell *sh, size_t argc, char *argv[]) { -#if defined(CONFIG_NET_NATIVE_IPV4) +#if defined(CONFIG_NET_IPV4) struct net_if *iface; int idx; struct in_addr addr; @@ -238,10 +239,9 @@ static int cmd_net_ip_gateway(const struct shell *sh, size_t argc, char *argv[]) net_if_ipv4_set_gw(iface, &addr); -#else /* CONFIG_NET_NATIVE_IPV4 */ - PR_INFO("Set %s and %s to enable native %s support.\n", - "CONFIG_NET_NATIVE", "CONFIG_NET_IPV4", "IPv4"); -#endif /* CONFIG_NET_NATIVE_IPV4 */ +#else /* CONFIG_NET_IPV4 */ + PR_INFO("Set %s to enable %s support.\n", "CONFIG_NET_IPV4", "IPv4"); +#endif /* CONFIG_NET_IPV4 */ return 0; } diff --git a/subsys/net/lib/shell/ipv6.c b/subsys/net/lib/shell/ipv6.c index b9a9252f1acc..eb65185c03ca 100644 --- a/subsys/net/lib/shell/ipv6.c +++ b/subsys/net/lib/shell/ipv6.c @@ -8,6 +8,8 @@ #include LOG_MODULE_DECLARE(net_shell); +#include + #include "net_shell_private.h" #include "../ip/ipv6.h" @@ -54,8 +56,6 @@ void ipv6_frag_cb(struct net_ipv6_reassembly *reass, void *user_data) } #endif /* CONFIG_NET_IPV6_FRAGMENT */ -#if defined(CONFIG_NET_NATIVE_IPV6) - #if defined(CONFIG_NET_IPV6_PE) static void ipv6_pe_filter_cb(struct in6_addr *prefix, bool is_denylist, void *user_data) @@ -78,6 +78,7 @@ static void ipv6_pe_filter_cb(struct in6_addr *prefix, bool is_denylist, } #endif /* CONFIG_NET_IPV6_PE */ +#if defined(CONFIG_NET_IPV6) static void address_lifetime_cb(struct net_if *iface, void *user_data) { struct net_shell_user_data *data = user_data; @@ -99,16 +100,18 @@ static void address_lifetime_cb(struct net_if *iface, void *user_data) PR("Type \tState \tLifetime (sec)\tRef\tAddress\n"); ARRAY_FOR_EACH(ipv6->unicast, i) { - struct net_if_ipv6_prefix *prefix; char remaining_str[sizeof("01234567890")]; - uint32_t remaining; - uint8_t prefix_len; + uint8_t prefix_len = 128U; if (!ipv6->unicast[i].is_used || ipv6->unicast[i].address.family != AF_INET6) { continue; } +#if defined(CONFIG_NET_NATIVE_IPV6) + struct net_if_ipv6_prefix *prefix; + uint32_t remaining; + remaining = net_timeout_remaining(&ipv6->unicast[i].lifetime, k_uptime_get_32()); @@ -116,8 +119,6 @@ static void address_lifetime_cb(struct net_if *iface, void *user_data) &ipv6->unicast[i].address.in6_addr); if (prefix) { prefix_len = prefix->len; - } else { - prefix_len = 128U; } if (ipv6->unicast[i].is_infinite) { @@ -127,6 +128,9 @@ static void address_lifetime_cb(struct net_if *iface, void *user_data) snprintk(remaining_str, sizeof(remaining_str) - 1, "%u", remaining); } +#else + snprintk(remaining_str, sizeof(remaining_str) - 1, "infinite"); +#endif /* CONFIG_NET_NATIVE_IPV6 */ PR("%s \t%s\t%14s\t%ld\t%s/%d%s\n", addrtype2str(ipv6->unicast[i].addr_type), @@ -137,13 +141,13 @@ static void address_lifetime_cb(struct net_if *iface, void *user_data) ipv6->unicast[i].is_temporary ? " (temporary)" : ""); } } -#endif /* CONFIG_NET_NATIVE_IPV6 */ +#endif /* CONFIG_NET_IPV6 */ static int cmd_net_ipv6(const struct shell *sh, size_t argc, char *argv[]) { -#if defined(CONFIG_NET_NATIVE_IPV6) +#if defined(CONFIG_NET_IPV6) struct net_shell_user_data user_data; -#endif +#endif /* CONFIG_NET_IPV6 */ PR("IPv6 support : %s\n", IS_ENABLED(CONFIG_NET_IPV6) ? @@ -189,8 +193,10 @@ static int cmd_net_ipv6(const struct shell *sh, size_t argc, char *argv[]) PR("Max number of IPv6 privacy extension filters " " : %d\n", CONFIG_NET_IPV6_PE_FILTER_PREFIX_COUNT); -#endif +#endif /* CONFIG_NET_IPV6_PE */ +#endif /* CONFIG_NET_NATIVE_IPV6 */ +#if defined(CONFIG_NET_IPV6) PR("Max number of IPv6 network interfaces " "in the system : %d\n", CONFIG_NET_IF_MAX_IPV6_COUNT); @@ -209,15 +215,14 @@ static int cmd_net_ipv6(const struct shell *sh, size_t argc, char *argv[]) /* Print information about address lifetime */ net_if_foreach(address_lifetime_cb, &user_data); - -#endif /* CONFIG_NET_NATIVE_IPV6 */ +#endif /* CONFIG_NET_IPV6 */ return 0; } static int cmd_net_ip6_add(const struct shell *sh, size_t argc, char *argv[]) { -#if defined(CONFIG_NET_NATIVE_IPV6) +#if defined(CONFIG_NET_IPV6) struct net_if *iface = NULL; int idx; struct in6_addr addr; @@ -262,16 +267,15 @@ static int cmd_net_ip6_add(const struct shell *sh, size_t argc, char *argv[]) } } -#else /* CONFIG_NET_NATIVE_IPV6 */ - PR_INFO("Set %s and %s to enable native %s support.\n", - "CONFIG_NET_NATIVE", "CONFIG_NET_IPV6", "IPv6"); -#endif /* CONFIG_NET_NATIVE_IPV6 */ +#else /* CONFIG_NET_IPV6 */ + PR_INFO("Set %s to enable %s support.\n", "CONFIG_NET_IPV6", "IPv6"); +#endif /* CONFIG_NET_IPV6 */ return 0; } static int cmd_net_ip6_del(const struct shell *sh, size_t argc, char *argv[]) { -#if defined(CONFIG_NET_NATIVE_IPV6) +#if defined(CONFIG_NET_IPV6) struct net_if *iface = NULL; int idx; struct in6_addr addr; @@ -317,10 +321,9 @@ static int cmd_net_ip6_del(const struct shell *sh, size_t argc, char *argv[]) } } -#else /* CONFIG_NET_NATIVE_IPV6 */ - PR_INFO("Set %s and %s to enable native %s support.\n", - "CONFIG_NET_NATIVE", "CONFIG_NET_IPV6", "IPv6"); -#endif /* CONFIG_NET_NATIVE_IPV6 */ +#else /* CONFIG_NET_IPV6 */ + PR_INFO("Set %s to enable %s support.\n", "CONFIG_NET_IPV6", "IPv6"); +#endif /* CONFIG_NET_IPV6 */ return 0; } diff --git a/subsys/net/lib/sockets/CMakeLists.txt b/subsys/net/lib/sockets/CMakeLists.txt index 253cb4a182f8..28b76df4a7af 100644 --- a/subsys/net/lib/sockets/CMakeLists.txt +++ b/subsys/net/lib/sockets/CMakeLists.txt @@ -20,6 +20,7 @@ zephyr_library_sources( ) endif() +zephyr_library_sources_ifdef(CONFIG_NET_NATIVE sockets_inet.c) zephyr_library_sources_ifdef(CONFIG_NET_SOCKETS_CAN sockets_can.c) zephyr_library_sources_ifdef(CONFIG_NET_SOCKETS_PACKET sockets_packet.c) zephyr_library_sources_ifdef(CONFIG_NET_SOCKETS_SOCKOPT_TLS sockets_tls.c) diff --git a/subsys/net/lib/sockets/sockets.c b/subsys/net/lib/sockets/sockets.c index 12fbce5c6c52..be7ba5ad9678 100644 --- a/subsys/net/lib/sockets/sockets.c +++ b/subsys/net/lib/sockets/sockets.c @@ -11,32 +11,11 @@ LOG_MODULE_REGISTER(net_sock, CONFIG_NET_SOCKETS_LOG_LEVEL); #include -#include -#include #include #include -#include -#include #include -#include -#include -#include - -#if defined(CONFIG_SOCKS) -#include "socks.h" -#endif - -#include -#include "../../ip/ipv6.h" - -#include "../../ip/net_stats.h" #include "sockets_internal.h" -#include "../../ip/tcp_internal.h" -#include "../../ip/net_private.h" - -#define SET_ERRNO(x) \ - { int _err = x; if (_err < 0) { errno = -_err; return -1; } } #define VTABLE_CALL(fn, sock, ...) \ ({ \ @@ -65,8 +44,6 @@ LOG_MODULE_REGISTER(net_sock, CONFIG_NET_SOCKETS_LOG_LEVEL); retval; \ }) -const struct socket_op_vtable sock_fd_op_vtable; - static inline void *get_sock_vtable(int sock, const struct socket_op_vtable **vtable, struct k_mutex **lock) @@ -97,6 +74,19 @@ static inline void *get_sock_vtable(int sock, return ctx; } +size_t msghdr_non_empty_iov_count(const struct msghdr *msg) +{ + size_t non_empty_iov_count = 0; + + for (size_t i = 0; i < msg->msg_iovlen; i++) { + if (msg->msg_iov[i].iov_len) { + non_empty_iov_count++; + } + } + + return non_empty_iov_count; +} + void *z_impl_zsock_get_context_object(int sock) { const struct socket_op_vtable *ignored; @@ -114,108 +104,6 @@ void *z_vrfy_zsock_get_context_object(int sock) #include #endif -static void zsock_received_cb(struct net_context *ctx, - struct net_pkt *pkt, - union net_ip_header *ip_hdr, - union net_proto_header *proto_hdr, - int status, - void *user_data); - -static int fifo_wait_non_empty(struct k_fifo *fifo, k_timeout_t timeout) -{ - struct k_poll_event events[] = { - K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE, - K_POLL_MODE_NOTIFY_ONLY, fifo), - }; - - return k_poll(events, ARRAY_SIZE(events), timeout); -} - -static void zsock_flush_queue(struct net_context *ctx) -{ - bool is_listen = net_context_get_state(ctx) == NET_CONTEXT_LISTENING; - void *p; - - /* recv_q and accept_q are shared via a union */ - while ((p = k_fifo_get(&ctx->recv_q, K_NO_WAIT)) != NULL) { - if (is_listen) { - NET_DBG("discarding ctx %p", p); - net_context_put(p); - } else { - NET_DBG("discarding pkt %p", p); - net_pkt_unref(p); - } - } - - /* Some threads might be waiting on recv, cancel the wait */ - k_fifo_cancel_wait(&ctx->recv_q); - - /* Wake reader if it was sleeping */ - (void)k_condvar_signal(&ctx->cond.recv); -} - -#if defined(CONFIG_NET_NATIVE) -static int zsock_socket_internal(int family, int type, int proto) -{ - int fd = zvfs_reserve_fd(); - struct net_context *ctx; - int res; - - if (fd < 0) { - return -1; - } - - if (proto == 0) { - if (family == AF_INET || family == AF_INET6) { - if (type == SOCK_DGRAM) { - proto = IPPROTO_UDP; - } else if (type == SOCK_STREAM) { - proto = IPPROTO_TCP; - } - } - } - - res = net_context_get(family, type, proto, &ctx); - if (res < 0) { - zvfs_free_fd(fd); - errno = -res; - return -1; - } - - /* Initialize user_data, all other calls will preserve it */ - ctx->user_data = NULL; - - /* The socket flags are stored here */ - ctx->socket_data = NULL; - - /* recv_q and accept_q are in union */ - k_fifo_init(&ctx->recv_q); - - /* Condition variable is used to avoid keeping lock for a long time - * when waiting data to be received - */ - k_condvar_init(&ctx->cond.recv); - - /* TCP context is effectively owned by both application - * and the stack: stack may detect that peer closed/aborted - * connection, but it must not dispose of the context behind - * the application back. Likewise, when application "closes" - * context, it's not disposed of immediately - there's yet - * closing handshake for stack to perform. - */ - if (proto == IPPROTO_TCP) { - net_context_ref(ctx); - } - - zvfs_finalize_typed_fd(fd, ctx, (const struct fd_op_vtable *)&sock_fd_op_vtable, - ZVFS_MODE_IFSOCK); - - NET_DBG("socket: ctx=%p, fd=%d", ctx, fd); - - return fd; -} -#endif /* CONFIG_NET_NATIVE */ - int z_impl_zsock_socket(int family, int type, int proto) { STRUCT_SECTION_FOREACH(net_socket_register, sock_family) { @@ -259,29 +147,6 @@ static inline int z_vrfy_zsock_socket(int family, int type, int proto) #include #endif /* CONFIG_USERSPACE */ -int zsock_close_ctx(struct net_context *ctx) -{ - /* Reset callbacks to avoid any race conditions while - * flushing queues. No need to check return values here, - * as these are fail-free operations and we're closing - * socket anyway. - */ - if (net_context_get_state(ctx) == NET_CONTEXT_LISTENING) { - (void)net_context_accept(ctx, NULL, K_NO_WAIT, NULL); - } else { - (void)net_context_recv(ctx, NULL, K_NO_WAIT, NULL); - } - - ctx->user_data = INT_TO_POINTER(EINTR); - sock_set_error(ctx); - - zsock_flush_queue(ctx); - - SET_ERRNO(net_context_put(ctx)); - - return 0; -} - int z_impl_zsock_close(int sock) { const struct socket_op_vtable *vtable; @@ -366,128 +231,6 @@ static inline int z_vrfy_zsock_shutdown(int sock, int how) #include #endif /* CONFIG_USERSPACE */ -static void zsock_accepted_cb(struct net_context *new_ctx, - struct sockaddr *addr, socklen_t addrlen, - int status, void *user_data) { - struct net_context *parent = user_data; - - NET_DBG("parent=%p, ctx=%p, st=%d", parent, new_ctx, status); - - if (status == 0) { - /* This just installs a callback, so cannot fail. */ - (void)net_context_recv(new_ctx, zsock_received_cb, K_NO_WAIT, - NULL); - k_fifo_init(&new_ctx->recv_q); - k_condvar_init(&new_ctx->cond.recv); - - k_fifo_put(&parent->accept_q, new_ctx); - - /* TCP context is effectively owned by both application - * and the stack: stack may detect that peer closed/aborted - * connection, but it must not dispose of the context behind - * the application back. Likewise, when application "closes" - * context, it's not disposed of immediately - there's yet - * closing handshake for stack to perform. - */ - net_context_ref(new_ctx); - - (void)k_condvar_signal(&parent->cond.recv); - } - -} - -static void zsock_received_cb(struct net_context *ctx, - struct net_pkt *pkt, - union net_ip_header *ip_hdr, - union net_proto_header *proto_hdr, - int status, - void *user_data) -{ - if (ctx->cond.lock) { - (void)k_mutex_lock(ctx->cond.lock, K_FOREVER); - } - - NET_DBG("ctx=%p, pkt=%p, st=%d, user_data=%p", ctx, pkt, status, - user_data); - - if (status < 0) { - ctx->user_data = INT_TO_POINTER(-status); - sock_set_error(ctx); - } - - /* if pkt is NULL, EOF */ - if (!pkt) { - struct net_pkt *last_pkt = k_fifo_peek_tail(&ctx->recv_q); - - if (!last_pkt) { - /* If there're no packets in the queue, recv() may - * be blocked waiting on it to become non-empty, - * so cancel that wait. - */ - sock_set_eof(ctx); - k_fifo_cancel_wait(&ctx->recv_q); - NET_DBG("Marked socket %p as peer-closed", ctx); - } else { - net_pkt_set_eof(last_pkt, true); - NET_DBG("Set EOF flag on pkt %p", last_pkt); - } - - goto unlock; - } - - /* Normal packet */ - net_pkt_set_eof(pkt, false); - - net_pkt_set_rx_stats_tick(pkt, k_cycle_get_32()); - - k_fifo_put(&ctx->recv_q, pkt); - -unlock: - /* Wake reader if it was sleeping */ - (void)k_condvar_signal(&ctx->cond.recv); - - if (ctx->cond.lock) { - (void)k_mutex_unlock(ctx->cond.lock); - } -} - -int zsock_shutdown_ctx(struct net_context *ctx, int how) -{ - if (how == ZSOCK_SHUT_RD) { - if (net_context_get_state(ctx) == NET_CONTEXT_LISTENING) { - SET_ERRNO(net_context_accept(ctx, NULL, K_NO_WAIT, NULL)); - } else { - SET_ERRNO(net_context_recv(ctx, NULL, K_NO_WAIT, NULL)); - } - - sock_set_eof(ctx); - - zsock_flush_queue(ctx); - } else if (how == ZSOCK_SHUT_WR || how == ZSOCK_SHUT_RDWR) { - SET_ERRNO(-ENOTSUP); - } else { - SET_ERRNO(-EINVAL); - } - - return 0; -} - -int zsock_bind_ctx(struct net_context *ctx, const struct sockaddr *addr, - socklen_t addrlen) -{ - SET_ERRNO(net_context_bind(ctx, addr, addrlen)); - /* For DGRAM socket, we expect to receive packets after call to - * bind(), but for STREAM socket, next expected operation is - * listen(), which doesn't work if recv callback is set. - */ - if (net_context_get_type(ctx) == SOCK_DGRAM) { - SET_ERRNO(net_context_recv(ctx, zsock_received_cb, K_NO_WAIT, - ctx->user_data)); - } - - return 0; -} - int z_impl_zsock_bind(int sock, const struct sockaddr *addr, socklen_t addrlen) { int ret; @@ -516,64 +259,6 @@ static inline int z_vrfy_zsock_bind(int sock, const struct sockaddr *addr, #include #endif /* CONFIG_USERSPACE */ -static void zsock_connected_cb(struct net_context *ctx, int status, void *user_data) -{ - if (status < 0) { - ctx->user_data = INT_TO_POINTER(-status); - sock_set_error(ctx); - } -} - -int zsock_connect_ctx(struct net_context *ctx, const struct sockaddr *addr, - socklen_t addrlen) -{ - -#if defined(CONFIG_SOCKS) - if (net_context_is_proxy_enabled(ctx)) { - SET_ERRNO(net_socks5_connect(ctx, addr, addrlen)); - SET_ERRNO(net_context_recv(ctx, zsock_received_cb, - K_NO_WAIT, ctx->user_data)); - return 0; - } -#endif - if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTED) { - return 0; - } else if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTING) { - if (sock_is_error(ctx)) { - SET_ERRNO(-POINTER_TO_INT(ctx->user_data)); - } else { - SET_ERRNO(-EALREADY); - } - } else { - k_timeout_t timeout = K_MSEC(CONFIG_NET_SOCKETS_CONNECT_TIMEOUT); - net_context_connect_cb_t cb = NULL; - - if (sock_is_nonblock(ctx)) { - timeout = K_NO_WAIT; - cb = zsock_connected_cb; - } - - if (net_context_get_type(ctx) == SOCK_STREAM) { - /* For STREAM sockets net_context_recv() only installs - * recv callback w/o side effects, and it has to be done - * first to avoid race condition, when TCP stream data - * arrives right after connect. - */ - SET_ERRNO(net_context_recv(ctx, zsock_received_cb, - K_NO_WAIT, ctx->user_data)); - SET_ERRNO(net_context_connect(ctx, addr, addrlen, cb, - timeout, ctx->user_data)); - } else { - SET_ERRNO(net_context_connect(ctx, addr, addrlen, cb, - timeout, ctx->user_data)); - SET_ERRNO(net_context_recv(ctx, zsock_received_cb, - K_NO_WAIT, ctx->user_data)); - } - } - - return 0; -} - int z_impl_zsock_connect(int sock, const struct sockaddr *addr, socklen_t addrlen) { @@ -603,14 +288,6 @@ int z_vrfy_zsock_connect(int sock, const struct sockaddr *addr, #include #endif /* CONFIG_USERSPACE */ -int zsock_listen_ctx(struct net_context *ctx, int backlog) -{ - SET_ERRNO(net_context_listen(ctx, backlog)); - SET_ERRNO(net_context_accept(ctx, zsock_accepted_cb, K_NO_WAIT, ctx)); - - return 0; -} - int z_impl_zsock_listen(int sock, int backlog) { int ret; @@ -632,91 +309,6 @@ static inline int z_vrfy_zsock_listen(int sock, int backlog) #include #endif /* CONFIG_USERSPACE */ -int zsock_accept_ctx(struct net_context *parent, struct sockaddr *addr, - socklen_t *addrlen) -{ - struct net_context *ctx; - struct net_pkt *last_pkt; - int fd, ret; - - if (!sock_is_nonblock(parent)) { - k_timeout_t timeout = K_FOREVER; - - /* accept() can reuse zsock_wait_data(), as underneath it's - * monitoring the same queue (accept_q is an alias for recv_q). - */ - ret = zsock_wait_data(parent, &timeout); - if (ret < 0) { - errno = -ret; - return -1; - } - } - - ctx = k_fifo_get(&parent->accept_q, K_NO_WAIT); - if (ctx == NULL) { - errno = EAGAIN; - return -1; - } - - fd = zvfs_reserve_fd(); - if (fd < 0) { - zsock_flush_queue(ctx); - net_context_put(ctx); - return -1; - } - - /* Check if the connection is already disconnected */ - last_pkt = k_fifo_peek_tail(&ctx->recv_q); - if (last_pkt) { - if (net_pkt_eof(last_pkt)) { - sock_set_eof(ctx); - zvfs_free_fd(fd); - zsock_flush_queue(ctx); - net_context_put(ctx); - errno = ECONNABORTED; - return -1; - } - } - - if (net_context_is_closing(ctx)) { - errno = ECONNABORTED; - zvfs_free_fd(fd); - zsock_flush_queue(ctx); - net_context_put(ctx); - return -1; - } - - net_context_set_accepting(ctx, false); - - - if (addr != NULL && addrlen != NULL) { - int len = MIN(*addrlen, sizeof(ctx->remote)); - - memcpy(addr, &ctx->remote, len); - /* addrlen is a value-result argument, set to actual - * size of source address - */ - if (ctx->remote.sa_family == AF_INET) { - *addrlen = sizeof(struct sockaddr_in); - } else if (ctx->remote.sa_family == AF_INET6) { - *addrlen = sizeof(struct sockaddr_in6); - } else { - zvfs_free_fd(fd); - errno = ENOTSUP; - zsock_flush_queue(ctx); - net_context_put(ctx); - return -1; - } - } - - NET_DBG("accept: ctx=%p, fd=%d", ctx, fd); - - zvfs_finalize_typed_fd(fd, ctx, (const struct fd_op_vtable *)&sock_fd_op_vtable, - ZVFS_MODE_IFSOCK); - - return fd; -} - int z_impl_zsock_accept(int sock, struct sockaddr *addr, socklen_t *addrlen) { int new_sock; @@ -755,142 +347,6 @@ static inline int z_vrfy_zsock_accept(int sock, struct sockaddr *addr, #include #endif /* CONFIG_USERSPACE */ -#define WAIT_BUFS_INITIAL_MS 10 -#define WAIT_BUFS_MAX_MS 100 -#define MAX_WAIT_BUFS K_MSEC(CONFIG_NET_SOCKET_MAX_SEND_WAIT) - -static int send_check_and_wait(struct net_context *ctx, int status, - k_timepoint_t buf_timeout, k_timeout_t timeout, - uint32_t *retry_timeout) -{ - if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { - goto out; - } - - if (status != -ENOBUFS && status != -EAGAIN) { - goto out; - } - - /* If we cannot get any buffers in reasonable - * amount of time, then do not wait forever as - * there might be some bigger issue. - * If we get -EAGAIN and cannot recover, then - * it means that the sending window is blocked - * and we just cannot send anything. - */ - if (sys_timepoint_expired(buf_timeout)) { - if (status == -ENOBUFS) { - status = -ENOMEM; - } else { - status = -ENOBUFS; - } - - goto out; - } - - if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) { - *retry_timeout = - MIN(*retry_timeout, k_ticks_to_ms_floor32(timeout.ticks)); - } - - if (ctx->cond.lock) { - (void)k_mutex_unlock(ctx->cond.lock); - } - - if (status == -ENOBUFS) { - /* We can monitor net_pkt/net_buf availability, so just wait. */ - k_sleep(K_MSEC(*retry_timeout)); - } - - if (status == -EAGAIN) { - if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) && - net_context_get_type(ctx) == SOCK_STREAM && - !net_if_is_ip_offloaded(net_context_get_iface(ctx))) { - struct k_poll_event event; - - k_poll_event_init(&event, - K_POLL_TYPE_SEM_AVAILABLE, - K_POLL_MODE_NOTIFY_ONLY, - net_tcp_tx_sem_get(ctx)); - - k_poll(&event, 1, K_MSEC(*retry_timeout)); - } else { - k_sleep(K_MSEC(*retry_timeout)); - } - } - /* Exponentially increase the retry timeout - * Cap the value to WAIT_BUFS_MAX_MS - */ - *retry_timeout = MIN(WAIT_BUFS_MAX_MS, *retry_timeout << 1); - - if (ctx->cond.lock) { - (void)k_mutex_lock(ctx->cond.lock, K_FOREVER); - } - - return 0; - -out: - errno = -status; - return -1; -} - -ssize_t zsock_sendto_ctx(struct net_context *ctx, const void *buf, size_t len, - int flags, - const struct sockaddr *dest_addr, socklen_t addrlen) -{ - k_timeout_t timeout = K_FOREVER; - uint32_t retry_timeout = WAIT_BUFS_INITIAL_MS; - k_timepoint_t buf_timeout, end; - int status; - - if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) { - timeout = K_NO_WAIT; - buf_timeout = sys_timepoint_calc(K_NO_WAIT); - } else { - net_context_get_option(ctx, NET_OPT_SNDTIMEO, &timeout, NULL); - buf_timeout = sys_timepoint_calc(MAX_WAIT_BUFS); - } - end = sys_timepoint_calc(timeout); - - /* Register the callback before sending in order to receive the response - * from the peer. - */ - status = net_context_recv(ctx, zsock_received_cb, - K_NO_WAIT, ctx->user_data); - if (status < 0) { - errno = -status; - return -1; - } - - while (1) { - if (dest_addr) { - status = net_context_sendto(ctx, buf, len, dest_addr, - addrlen, NULL, timeout, - ctx->user_data); - } else { - status = net_context_send(ctx, buf, len, NULL, timeout, - ctx->user_data); - } - - if (status < 0) { - status = send_check_and_wait(ctx, status, buf_timeout, - timeout, &retry_timeout); - if (status < 0) { - return status; - } - - /* Update the timeout value in case loop is repeated. */ - timeout = sys_timepoint_timeout(end); - - continue; - } - - break; - } - - return status; -} - ssize_t z_impl_zsock_sendto(int sock, const void *buf, size_t len, int flags, const struct sockaddr *dest_addr, socklen_t addrlen) { @@ -929,73 +385,21 @@ ssize_t z_vrfy_zsock_sendto(int sock, const void *buf, size_t len, int flags, #include #endif /* CONFIG_USERSPACE */ -size_t msghdr_non_empty_iov_count(const struct msghdr *msg) +ssize_t z_impl_zsock_sendmsg(int sock, const struct msghdr *msg, int flags) { - size_t non_empty_iov_count = 0; + int bytes_sent; - for (size_t i = 0; i < msg->msg_iovlen; i++) { - if (msg->msg_iov[i].iov_len) { - non_empty_iov_count++; - } - } + SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, sendmsg, sock, msg, flags); - return non_empty_iov_count; -} + bytes_sent = VTABLE_CALL(sendmsg, sock, msg, flags); -ssize_t zsock_sendmsg_ctx(struct net_context *ctx, const struct msghdr *msg, - int flags) -{ - k_timeout_t timeout = K_FOREVER; - uint32_t retry_timeout = WAIT_BUFS_INITIAL_MS; - k_timepoint_t buf_timeout, end; - int status; - - if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) { - timeout = K_NO_WAIT; - buf_timeout = sys_timepoint_calc(K_NO_WAIT); - } else { - net_context_get_option(ctx, NET_OPT_SNDTIMEO, &timeout, NULL); - buf_timeout = sys_timepoint_calc(MAX_WAIT_BUFS); - } - end = sys_timepoint_calc(timeout); + SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, sendmsg, sock, + bytes_sent < 0 ? -errno : bytes_sent); - while (1) { - status = net_context_sendmsg(ctx, msg, flags, NULL, timeout, NULL); - if (status < 0) { - status = send_check_and_wait(ctx, status, - buf_timeout, - timeout, &retry_timeout); - if (status < 0) { - return status; - } + sock_obj_core_update_send_stats(sock, bytes_sent); - /* Update the timeout value in case loop is repeated. */ - timeout = sys_timepoint_timeout(end); - - continue; - } - - break; - } - - return status; -} - -ssize_t z_impl_zsock_sendmsg(int sock, const struct msghdr *msg, int flags) -{ - int bytes_sent; - - SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, sendmsg, sock, msg, flags); - - bytes_sent = VTABLE_CALL(sendmsg, sock, msg, flags); - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, sendmsg, sock, - bytes_sent < 0 ? -errno : bytes_sent); - - sock_obj_core_update_send_stats(sock, bytes_sent); - - return bytes_sent; -} + return bytes_sent; +} #ifdef CONFIG_USERSPACE static inline ssize_t z_vrfy_zsock_sendmsg(int sock, @@ -1086,2468 +490,631 @@ static inline ssize_t z_vrfy_zsock_sendmsg(int sock, #include #endif /* CONFIG_USERSPACE */ -static int sock_get_pkt_src_addr(struct net_pkt *pkt, - enum net_ip_protocol proto, - struct sockaddr *addr, - socklen_t addrlen) +ssize_t z_impl_zsock_recvfrom(int sock, void *buf, size_t max_len, int flags, + struct sockaddr *src_addr, socklen_t *addrlen) { - int ret = 0; - struct net_pkt_cursor backup; - uint16_t *port; - - if (!addr || !pkt) { - return -EINVAL; - } - - net_pkt_cursor_backup(pkt, &backup); - net_pkt_cursor_init(pkt); + int bytes_received; - addr->sa_family = net_pkt_family(pkt); + SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, recvfrom, sock, max_len, flags, src_addr, addrlen); - if (IS_ENABLED(CONFIG_NET_IPV4) && - net_pkt_family(pkt) == AF_INET) { - NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access, - struct net_ipv4_hdr); - struct sockaddr_in *addr4 = net_sin(addr); - struct net_ipv4_hdr *ipv4_hdr; + bytes_received = VTABLE_CALL(recvfrom, sock, buf, max_len, flags, src_addr, addrlen); - if (addrlen < sizeof(struct sockaddr_in)) { - ret = -EINVAL; - goto error; - } + SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, recvfrom, sock, + src_addr, addrlen, + bytes_received < 0 ? -errno : bytes_received); - ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data( - pkt, &ipv4_access); - if (!ipv4_hdr || - net_pkt_acknowledge_data(pkt, &ipv4_access) || - net_pkt_skip(pkt, net_pkt_ipv4_opts_len(pkt))) { - ret = -ENOBUFS; - goto error; - } + sock_obj_core_update_recv_stats(sock, bytes_received); - net_ipv4_addr_copy_raw((uint8_t *)&addr4->sin_addr, ipv4_hdr->src); - port = &addr4->sin_port; - } else if (IS_ENABLED(CONFIG_NET_IPV6) && - net_pkt_family(pkt) == AF_INET6) { - NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv6_access, - struct net_ipv6_hdr); - struct sockaddr_in6 *addr6 = net_sin6(addr); - struct net_ipv6_hdr *ipv6_hdr; - - if (addrlen < sizeof(struct sockaddr_in6)) { - ret = -EINVAL; - goto error; - } + return bytes_received; +} - ipv6_hdr = (struct net_ipv6_hdr *)net_pkt_get_data( - pkt, &ipv6_access); - if (!ipv6_hdr || - net_pkt_acknowledge_data(pkt, &ipv6_access) || - net_pkt_skip(pkt, net_pkt_ipv6_ext_len(pkt))) { - ret = -ENOBUFS; - goto error; - } +#ifdef CONFIG_USERSPACE +ssize_t z_vrfy_zsock_recvfrom(int sock, void *buf, size_t max_len, int flags, + struct sockaddr *src_addr, socklen_t *addrlen) +{ + socklen_t addrlen_copy; + ssize_t ret; - net_ipv6_addr_copy_raw((uint8_t *)&addr6->sin6_addr, ipv6_hdr->src); - port = &addr6->sin6_port; - } else { - ret = -ENOTSUP; - goto error; + if (K_SYSCALL_MEMORY_WRITE(buf, max_len)) { + errno = EFAULT; + return -1; } - if (IS_ENABLED(CONFIG_NET_UDP) && proto == IPPROTO_UDP) { - NET_PKT_DATA_ACCESS_DEFINE(udp_access, struct net_udp_hdr); - struct net_udp_hdr *udp_hdr; - - udp_hdr = (struct net_udp_hdr *)net_pkt_get_data(pkt, - &udp_access); - if (!udp_hdr) { - ret = -ENOBUFS; - goto error; - } - - *port = udp_hdr->src_port; - } else if (IS_ENABLED(CONFIG_NET_TCP) && proto == IPPROTO_TCP) { - NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct net_tcp_hdr); - struct net_tcp_hdr *tcp_hdr; - - tcp_hdr = (struct net_tcp_hdr *)net_pkt_get_data(pkt, - &tcp_access); - if (!tcp_hdr) { - ret = -ENOBUFS; - goto error; - } - - *port = tcp_hdr->src_port; - } else { - ret = -ENOTSUP; + if (addrlen) { + K_OOPS(k_usermode_from_copy(&addrlen_copy, addrlen, + sizeof(socklen_t))); } + K_OOPS(src_addr && K_SYSCALL_MEMORY_WRITE(src_addr, addrlen_copy)); -error: - net_pkt_cursor_restore(pkt, &backup); - - return ret; -} - -#if defined(CONFIG_NET_OFFLOAD) -static bool net_pkt_remote_addr_is_unspecified(struct net_pkt *pkt) -{ - bool ret = true; + ret = z_impl_zsock_recvfrom(sock, (void *)buf, max_len, flags, + (struct sockaddr *)src_addr, + addrlen ? &addrlen_copy : NULL); - if (net_pkt_family(pkt) == AF_INET) { - ret = net_ipv4_is_addr_unspecified(&net_sin(&pkt->remote)->sin_addr); - } else if (net_pkt_family(pkt) == AF_INET6) { - ret = net_ipv6_is_addr_unspecified(&net_sin6(&pkt->remote)->sin6_addr); + if (addrlen) { + K_OOPS(k_usermode_to_copy(addrlen, &addrlen_copy, + sizeof(socklen_t))); } return ret; } +#include +#endif /* CONFIG_USERSPACE */ -static int sock_get_offload_pkt_src_addr(struct net_pkt *pkt, - struct net_context *ctx, - struct sockaddr *addr, - socklen_t addrlen) +ssize_t z_impl_zsock_recvmsg(int sock, struct msghdr *msg, int flags) { - int ret = 0; - - if (!addr || !pkt) { - return -EINVAL; - } + int bytes_received; - if (!net_pkt_remote_addr_is_unspecified(pkt)) { - if (IS_ENABLED(CONFIG_NET_IPV4) && - net_pkt_family(pkt) == AF_INET) { - if (addrlen < sizeof(struct sockaddr_in)) { - ret = -EINVAL; - goto error; - } + SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, recvmsg, sock, msg, flags); - memcpy(addr, &pkt->remote, sizeof(struct sockaddr_in)); - } else if (IS_ENABLED(CONFIG_NET_IPV6) && - net_pkt_family(pkt) == AF_INET6) { - if (addrlen < sizeof(struct sockaddr_in6)) { - ret = -EINVAL; - goto error; - } + bytes_received = VTABLE_CALL(recvmsg, sock, msg, flags); - memcpy(addr, &pkt->remote, sizeof(struct sockaddr_in6)); - } - } else if (ctx->flags & NET_CONTEXT_REMOTE_ADDR_SET) { - memcpy(addr, &ctx->remote, MIN(addrlen, sizeof(ctx->remote))); - } else { - ret = -ENOTSUP; - } + SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, recvmsg, sock, msg, + bytes_received < 0 ? -errno : bytes_received); -error: - return ret; -} -#else -static int sock_get_offload_pkt_src_addr(struct net_pkt *pkt, - struct net_context *ctx, - struct sockaddr *addr, - socklen_t addrlen) -{ - ARG_UNUSED(pkt); - ARG_UNUSED(ctx); - ARG_UNUSED(addr); - ARG_UNUSED(addrlen); + sock_obj_core_update_recv_stats(sock, bytes_received); - return 0; + return bytes_received; } -#endif /* CONFIG_NET_OFFLOAD */ -void net_socket_update_tc_rx_time(struct net_pkt *pkt, uint32_t end_tick) +#ifdef CONFIG_USERSPACE +ssize_t z_vrfy_zsock_recvmsg(int sock, struct msghdr *msg, int flags) { - net_pkt_set_rx_stats_tick(pkt, end_tick); + struct msghdr msg_copy; + size_t iovlen; + size_t i; + int ret; - net_stats_update_tc_rx_time(net_pkt_iface(pkt), - net_pkt_priority(pkt), - net_pkt_create_time(pkt), - end_tick); + if (msg == NULL) { + errno = EINVAL; + return -1; + } - SYS_PORT_TRACING_FUNC(net, rx_time, pkt, end_tick); + if (msg->msg_iov == NULL) { + errno = ENOMEM; + return -1; + } - if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)) { - uint32_t val, prev = net_pkt_create_time(pkt); - int i; + K_OOPS(k_usermode_from_copy(&msg_copy, (void *)msg, sizeof(msg_copy))); - for (i = 0; i < net_pkt_stats_tick_count(pkt); i++) { - if (!net_pkt_stats_tick(pkt)[i]) { - break; - } + k_usermode_from_copy(&iovlen, &msg->msg_iovlen, sizeof(iovlen)); - val = net_pkt_stats_tick(pkt)[i] - prev; - prev = net_pkt_stats_tick(pkt)[i]; - net_pkt_stats_tick(pkt)[i] = val; - } + msg_copy.msg_name = NULL; + msg_copy.msg_control = NULL; - net_stats_update_tc_rx_time_detail( - net_pkt_iface(pkt), - net_pkt_priority(pkt), - net_pkt_stats_tick(pkt)); + msg_copy.msg_iov = k_usermode_alloc_from_copy(msg->msg_iov, + msg->msg_iovlen * sizeof(struct iovec)); + if (!msg_copy.msg_iov) { + errno = ENOMEM; + goto fail; } -} -int zsock_wait_data(struct net_context *ctx, k_timeout_t *timeout) -{ - int ret; + /* Clear the pointers in the copy so that if the allocation in the + * next loop fails, we do not try to free non allocated memory + * in fail branch. + */ + memset(msg_copy.msg_iov, 0, msg->msg_iovlen * sizeof(struct iovec)); - if (ctx->cond.lock == NULL) { - /* For some reason the lock pointer is not set properly - * when called by fdtable.c:zvfs_finalize_fd() - * It is not practical to try to figure out the fdtable - * lock at this point so skip it. + for (i = 0; i < iovlen; i++) { + /* TODO: In practice we do not need to copy the actual data + * in msghdr when receiving data but currently there is no + * ready made function to do just that (unless we want to call + * relevant malloc function here ourselves). So just use + * the copying variant for now. */ - NET_WARN("No lock pointer set for context %p", ctx); - return -EINVAL; - } - - if (k_fifo_is_empty(&ctx->recv_q)) { - /* Wait for the data to arrive but without holding a lock */ - ret = k_condvar_wait(&ctx->cond.recv, ctx->cond.lock, - *timeout); - if (ret < 0) { - return ret; + msg_copy.msg_iov[i].iov_base = + k_usermode_alloc_from_copy(msg->msg_iov[i].iov_base, + msg->msg_iov[i].iov_len); + if (!msg_copy.msg_iov[i].iov_base) { + errno = ENOMEM; + goto fail; } - if (sock_is_error(ctx)) { - return -POINTER_TO_INT(ctx->user_data); - } + msg_copy.msg_iov[i].iov_len = msg->msg_iov[i].iov_len; } - return 0; -} - -static int insert_pktinfo(struct msghdr *msg, int level, int type, - void *pktinfo, size_t pktinfo_len) -{ - struct cmsghdr *cmsg; + if (msg->msg_namelen > 0) { + if (msg->msg_name == NULL) { + errno = EINVAL; + goto fail; + } - if (msg->msg_controllen < pktinfo_len) { - return -EINVAL; + msg_copy.msg_name = k_usermode_alloc_from_copy(msg->msg_name, + msg->msg_namelen); + if (msg_copy.msg_name == NULL) { + errno = ENOMEM; + goto fail; + } } - for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) { - if (cmsg->cmsg_len == 0) { - break; + if (msg->msg_controllen > 0) { + if (msg->msg_control == NULL) { + errno = EINVAL; + goto fail; } - } - if (cmsg == NULL) { - return -EINVAL; + msg_copy.msg_control = + k_usermode_alloc_from_copy(msg->msg_control, + msg->msg_controllen); + if (msg_copy.msg_control == NULL) { + errno = ENOMEM; + goto fail; + } } - cmsg->cmsg_len = CMSG_LEN(pktinfo_len); - cmsg->cmsg_level = level; - cmsg->cmsg_type = type; - - memcpy(CMSG_DATA(cmsg), pktinfo, pktinfo_len); - - return 0; -} - -static int add_timestamping(struct net_context *ctx, - struct net_pkt *pkt, - struct msghdr *msg) -{ - uint8_t timestamping = 0; - - net_context_get_option(ctx, NET_OPT_TIMESTAMPING, ×tamping, NULL); + ret = z_impl_zsock_recvmsg(sock, &msg_copy, flags); - if (timestamping) { - return insert_pktinfo(msg, SOL_SOCKET, SO_TIMESTAMPING, - net_pkt_timestamp(pkt), sizeof(struct net_ptp_time)); - } + /* Do not copy anything back if there was an error or nothing was + * received. + */ + if (ret > 0) { + if (msg->msg_namelen > 0 && msg->msg_name != NULL) { + K_OOPS(k_usermode_to_copy(msg->msg_name, + msg_copy.msg_name, + msg_copy.msg_namelen)); + } - return -ENOTSUP; -} + if (msg->msg_controllen > 0 && + msg->msg_control != NULL) { + K_OOPS(k_usermode_to_copy(msg->msg_control, + msg_copy.msg_control, + msg_copy.msg_controllen)); -static int add_pktinfo(struct net_context *ctx, - struct net_pkt *pkt, - struct msghdr *msg) -{ - int ret = -ENOTSUP; - struct net_pkt_cursor backup; - - net_pkt_cursor_backup(pkt, &backup); - net_pkt_cursor_init(pkt); - - if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) { - NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access, - struct net_ipv4_hdr); - struct in_pktinfo info; - struct net_ipv4_hdr *ipv4_hdr; - - ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data( - pkt, &ipv4_access); - if (ipv4_hdr == NULL || - net_pkt_acknowledge_data(pkt, &ipv4_access) || - net_pkt_skip(pkt, net_pkt_ipv4_opts_len(pkt))) { - ret = -ENOBUFS; - goto out; + msg->msg_controllen = msg_copy.msg_controllen; + } else { + msg->msg_controllen = 0U; } - net_ipv4_addr_copy_raw((uint8_t *)&info.ipi_addr, ipv4_hdr->dst); - net_ipv4_addr_copy_raw((uint8_t *)&info.ipi_spec_dst, - (uint8_t *)net_sin_ptr(&ctx->local)->sin_addr); - info.ipi_ifindex = ctx->iface; - - ret = insert_pktinfo(msg, IPPROTO_IP, IP_PKTINFO, - &info, sizeof(info)); + k_usermode_to_copy(&msg->msg_iovlen, + &msg_copy.msg_iovlen, + sizeof(msg->msg_iovlen)); - goto out; - } + /* The new iovlen cannot be bigger than the original one */ + NET_ASSERT(msg_copy.msg_iovlen <= iovlen); - if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) { - NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv6_access, - struct net_ipv6_hdr); - struct in6_pktinfo info; - struct net_ipv6_hdr *ipv6_hdr; - - ipv6_hdr = (struct net_ipv6_hdr *)net_pkt_get_data( - pkt, &ipv6_access); - if (ipv6_hdr == NULL || - net_pkt_acknowledge_data(pkt, &ipv6_access) || - net_pkt_skip(pkt, net_pkt_ipv6_ext_len(pkt))) { - ret = -ENOBUFS; - goto out; + for (i = 0; i < iovlen; i++) { + if (i < msg_copy.msg_iovlen) { + K_OOPS(k_usermode_to_copy(msg->msg_iov[i].iov_base, + msg_copy.msg_iov[i].iov_base, + msg_copy.msg_iov[i].iov_len)); + K_OOPS(k_usermode_to_copy(&msg->msg_iov[i].iov_len, + &msg_copy.msg_iov[i].iov_len, + sizeof(msg->msg_iov[i].iov_len))); + } else { + /* Clear out those vectors that we could not populate */ + msg->msg_iov[i].iov_len = 0; + } } - net_ipv6_addr_copy_raw((uint8_t *)&info.ipi6_addr, ipv6_hdr->dst); - info.ipi6_ifindex = ctx->iface; + k_usermode_to_copy(&msg->msg_flags, + &msg_copy.msg_flags, + sizeof(msg->msg_flags)); + } - ret = insert_pktinfo(msg, IPPROTO_IPV6, IPV6_RECVPKTINFO, - &info, sizeof(info)); + k_free(msg_copy.msg_name); + k_free(msg_copy.msg_control); - goto out; + /* Note that we need to free according to original iovlen */ + for (i = 0; i < iovlen; i++) { + k_free(msg_copy.msg_iov[i].iov_base); } -out: - net_pkt_cursor_restore(pkt, &backup); + k_free(msg_copy.msg_iov); return ret; -} - -static int update_msg_controllen(struct msghdr *msg) -{ - struct cmsghdr *cmsg; - size_t cmsg_space = 0; - for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) { - if (cmsg->cmsg_len == 0) { - break; - } - cmsg_space += cmsg->cmsg_len; +fail: + if (msg_copy.msg_name) { + k_free(msg_copy.msg_name); } - msg->msg_controllen = cmsg_space; - - return 0; -} - -static inline ssize_t zsock_recv_dgram(struct net_context *ctx, - struct msghdr *msg, - void *buf, - size_t max_len, - int flags, - struct sockaddr *src_addr, - socklen_t *addrlen) -{ - k_timeout_t timeout = K_FOREVER; - size_t recv_len = 0; - size_t read_len; - struct net_pkt_cursor backup; - struct net_pkt *pkt; - - if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) { - timeout = K_NO_WAIT; - } else { - int ret; - - net_context_get_option(ctx, NET_OPT_RCVTIMEO, &timeout, NULL); - ret = zsock_wait_data(ctx, &timeout); - if (ret < 0) { - errno = -ret; - return -1; - } + if (msg_copy.msg_control) { + k_free(msg_copy.msg_control); } - if (flags & ZSOCK_MSG_PEEK) { - int res; - - res = fifo_wait_non_empty(&ctx->recv_q, timeout); - /* EAGAIN when timeout expired, EINTR when cancelled */ - if (res && res != -EAGAIN && res != -EINTR) { - errno = -res; - return -1; + if (msg_copy.msg_iov) { + for (i = 0; i < msg_copy.msg_iovlen; i++) { + if (msg_copy.msg_iov[i].iov_base) { + k_free(msg_copy.msg_iov[i].iov_base); + } } - pkt = k_fifo_peek_head(&ctx->recv_q); - } else { - pkt = k_fifo_get(&ctx->recv_q, timeout); + k_free(msg_copy.msg_iov); } - if (!pkt) { - errno = EAGAIN; - return -1; - } - - net_pkt_cursor_backup(pkt, &backup); - - if (src_addr && addrlen) { - if (IS_ENABLED(CONFIG_NET_OFFLOAD) && - net_if_is_ip_offloaded(net_context_get_iface(ctx))) { - int ret; - - ret = sock_get_offload_pkt_src_addr(pkt, ctx, src_addr, - *addrlen); - if (ret < 0) { - errno = -ret; - NET_DBG("sock_get_offload_pkt_src_addr %d", ret); - goto fail; - } - } else { - int ret; - - ret = sock_get_pkt_src_addr(pkt, net_context_get_proto(ctx), - src_addr, *addrlen); - if (ret < 0) { - errno = -ret; - NET_DBG("sock_get_pkt_src_addr %d", ret); - goto fail; - } - } - - /* addrlen is a value-result argument, set to actual - * size of source address - */ - if (src_addr->sa_family == AF_INET) { - *addrlen = sizeof(struct sockaddr_in); - } else if (src_addr->sa_family == AF_INET6) { - *addrlen = sizeof(struct sockaddr_in6); - } else { - errno = ENOTSUP; - goto fail; - } - } - - if (msg != NULL) { - int iovec = 0; - size_t tmp_read_len; - - if (msg->msg_iovlen < 1 || msg->msg_iov == NULL) { - errno = ENOMEM; - return -1; - } - - recv_len = net_pkt_remaining_data(pkt); - tmp_read_len = read_len = MIN(recv_len, max_len); - - while (tmp_read_len > 0) { - size_t len; - - buf = msg->msg_iov[iovec].iov_base; - if (buf == NULL) { - errno = EINVAL; - return -1; - } - - len = MIN(tmp_read_len, msg->msg_iov[iovec].iov_len); - - if (net_pkt_read(pkt, buf, len)) { - errno = ENOBUFS; - goto fail; - } - - if (len <= tmp_read_len) { - tmp_read_len -= len; - msg->msg_iov[iovec].iov_len = len; - iovec++; - } else { - errno = EINVAL; - return -1; - } - } - - msg->msg_iovlen = iovec; - - if (recv_len != read_len) { - msg->msg_flags |= ZSOCK_MSG_TRUNC; - } - - } else { - recv_len = net_pkt_remaining_data(pkt); - read_len = MIN(recv_len, max_len); - - if (net_pkt_read(pkt, buf, read_len)) { - errno = ENOBUFS; - goto fail; - } - } - - if (msg != NULL) { - if (msg->msg_control != NULL) { - if (msg->msg_controllen > 0) { - if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING) && - net_context_is_timestamping_set(ctx)) { - if (add_timestamping(ctx, pkt, msg) < 0) { - msg->msg_flags |= ZSOCK_MSG_CTRUNC; - } - } - - if (IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO) && - net_context_is_recv_pktinfo_set(ctx)) { - if (add_pktinfo(ctx, pkt, msg) < 0) { - msg->msg_flags |= ZSOCK_MSG_CTRUNC; - } - } - - /* msg_controllen must be updated to reflect the total length of all - * control messages in the buffer. If there are no control data, - * msg_controllen will be cleared as expected It will also take into - * account pre-existing control data - */ - update_msg_controllen(msg); - } - } else { - msg->msg_controllen = 0U; - } - } - - if ((IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) || - IS_ENABLED(CONFIG_TRACING_NET_CORE)) && - !(flags & ZSOCK_MSG_PEEK)) { - net_socket_update_tc_rx_time(pkt, k_cycle_get_32()); - } - - if (!(flags & ZSOCK_MSG_PEEK)) { - net_pkt_unref(pkt); - } else { - net_pkt_cursor_restore(pkt, &backup); - } - - return (flags & ZSOCK_MSG_TRUNC) ? recv_len : read_len; - -fail: - if (!(flags & ZSOCK_MSG_PEEK)) { - net_pkt_unref(pkt); - } - - return -1; -} - -static size_t zsock_recv_stream_immediate(struct net_context *ctx, uint8_t **buf, size_t *max_len, - int flags) -{ - size_t len; - size_t pkt_len; - size_t recv_len = 0; - struct net_pkt *pkt; - struct net_pkt_cursor backup; - struct net_pkt *origin = NULL; - const bool do_recv = !(buf == NULL || max_len == NULL); - size_t _max_len = (max_len == NULL) ? SIZE_MAX : *max_len; - const bool peek = (flags & ZSOCK_MSG_PEEK) == ZSOCK_MSG_PEEK; - - while (_max_len > 0) { - /* only peek until we know we can dequeue and / or requeue buffer */ - pkt = k_fifo_peek_head(&ctx->recv_q); - if (pkt == NULL || pkt == origin) { - break; - } - - if (origin == NULL) { - /* mark first pkt to avoid cycles when observing */ - origin = pkt; - } - - pkt_len = net_pkt_remaining_data(pkt); - len = MIN(_max_len, pkt_len); - recv_len += len; - _max_len -= len; - - if (do_recv && len > 0) { - if (peek) { - net_pkt_cursor_backup(pkt, &backup); - } - - net_pkt_read(pkt, *buf, len); - /* update buffer position for caller */ - *buf += len; - - if (peek) { - net_pkt_cursor_restore(pkt, &backup); - } - } - - if (do_recv && !peek) { - if (len == pkt_len) { - /* dequeue empty packets when not observing */ - pkt = k_fifo_get(&ctx->recv_q, K_NO_WAIT); - if (net_pkt_eof(pkt)) { - sock_set_eof(ctx); - } - - if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) || - IS_ENABLED(CONFIG_TRACING_NET_CORE)) { - net_socket_update_tc_rx_time(pkt, k_cycle_get_32()); - } - - net_pkt_unref(pkt); - } - } else if (!do_recv || peek) { - /* requeue packets when observing */ - k_fifo_put(&ctx->recv_q, k_fifo_get(&ctx->recv_q, K_NO_WAIT)); - } - } - - if (do_recv) { - /* convey remaining buffer size back to caller */ - *max_len = _max_len; - } - - return recv_len; -} - -static int zsock_fionread_ctx(struct net_context *ctx) -{ - size_t ret = zsock_recv_stream_immediate(ctx, NULL, NULL, 0); - - return MIN(ret, INT_MAX); -} - -static ssize_t zsock_recv_stream_timed(struct net_context *ctx, struct msghdr *msg, - uint8_t *buf, size_t max_len, - int flags, k_timeout_t timeout) -{ - int res; - k_timepoint_t end; - size_t recv_len = 0, iovec = 0, available_len, max_iovlen = 0; - const bool waitall = (flags & ZSOCK_MSG_WAITALL) == ZSOCK_MSG_WAITALL; - - if (msg != NULL && buf == NULL) { - if (msg->msg_iovlen < 1) { - return -EINVAL; - } - - buf = msg->msg_iov[iovec].iov_base; - available_len = msg->msg_iov[iovec].iov_len; - msg->msg_iov[iovec].iov_len = 0; - max_iovlen = msg->msg_iovlen; - } - - for (end = sys_timepoint_calc(timeout); max_len > 0; timeout = sys_timepoint_timeout(end)) { - - if (sock_is_error(ctx)) { - return -POINTER_TO_INT(ctx->user_data); - } - - if (sock_is_eof(ctx)) { - return 0; - } - - if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { - res = zsock_wait_data(ctx, &timeout); - if (res < 0) { - return res; - } - } - - if (msg != NULL) { -again: - res = zsock_recv_stream_immediate(ctx, &buf, &available_len, flags); - recv_len += res; - - if (res == 0 && recv_len == 0 && K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { - return -EAGAIN; - } - - msg->msg_iov[iovec].iov_len += res; - buf = (uint8_t *)(msg->msg_iov[iovec].iov_base) + res; - max_len -= res; - - if (available_len == 0) { - /* All data to this iovec was written */ - iovec++; - - if (iovec == max_iovlen) { - break; - } - - msg->msg_iovlen = iovec; - buf = msg->msg_iov[iovec].iov_base; - available_len = msg->msg_iov[iovec].iov_len; - msg->msg_iov[iovec].iov_len = 0; - - /* If there is more data, read it now and do not wait */ - if (buf != NULL && available_len > 0) { - goto again; - } - - continue; - } - - } else { - res = zsock_recv_stream_immediate(ctx, &buf, &max_len, flags); - recv_len += res; - - if (res == 0) { - if (recv_len == 0 && K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { - return -EAGAIN; - } - } - } - - if (!waitall) { - break; - } - } - - return recv_len; -} - -static ssize_t zsock_recv_stream(struct net_context *ctx, struct msghdr *msg, - void *buf, size_t max_len, int flags) -{ - ssize_t res; - size_t recv_len = 0; - k_timeout_t timeout = K_FOREVER; - - if (!net_context_is_used(ctx)) { - errno = EBADF; - return -1; - } - - if (net_context_get_state(ctx) != NET_CONTEXT_CONNECTED) { - errno = ENOTCONN; - return -1; - } - - if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) { - timeout = K_NO_WAIT; - } else if (!sock_is_eof(ctx) && !sock_is_error(ctx)) { - net_context_get_option(ctx, NET_OPT_RCVTIMEO, &timeout, NULL); - } - - if (max_len == 0) { - /* no bytes requested - done! */ - return 0; - } - - res = zsock_recv_stream_timed(ctx, msg, buf, max_len, flags, timeout); - recv_len += MAX(0, res); - - if (res < 0) { - errno = -res; - return -1; - } - - if (!(flags & ZSOCK_MSG_PEEK)) { - net_context_update_recv_wnd(ctx, recv_len); - } - - return recv_len; -} - -ssize_t zsock_recvfrom_ctx(struct net_context *ctx, void *buf, size_t max_len, - int flags, - struct sockaddr *src_addr, socklen_t *addrlen) -{ - enum net_sock_type sock_type = net_context_get_type(ctx); - - if (max_len == 0) { - return 0; - } - - if (sock_type == SOCK_DGRAM) { - return zsock_recv_dgram(ctx, NULL, buf, max_len, flags, src_addr, addrlen); - } else if (sock_type == SOCK_STREAM) { - return zsock_recv_stream(ctx, NULL, buf, max_len, flags); - } - - __ASSERT(0, "Unknown socket type"); - - errno = ENOTSUP; - - return -1; -} - -ssize_t z_impl_zsock_recvfrom(int sock, void *buf, size_t max_len, int flags, - struct sockaddr *src_addr, socklen_t *addrlen) -{ - int bytes_received; - - SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, recvfrom, sock, max_len, flags, src_addr, addrlen); - - bytes_received = VTABLE_CALL(recvfrom, sock, buf, max_len, flags, src_addr, addrlen); - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, recvfrom, sock, - src_addr, addrlen, - bytes_received < 0 ? -errno : bytes_received); - - sock_obj_core_update_recv_stats(sock, bytes_received); - - return bytes_received; -} - -#ifdef CONFIG_USERSPACE -ssize_t z_vrfy_zsock_recvfrom(int sock, void *buf, size_t max_len, int flags, - struct sockaddr *src_addr, socklen_t *addrlen) -{ - socklen_t addrlen_copy; - ssize_t ret; - - if (K_SYSCALL_MEMORY_WRITE(buf, max_len)) { - errno = EFAULT; - return -1; - } - - if (addrlen) { - K_OOPS(k_usermode_from_copy(&addrlen_copy, addrlen, - sizeof(socklen_t))); - } - K_OOPS(src_addr && K_SYSCALL_MEMORY_WRITE(src_addr, addrlen_copy)); - - ret = z_impl_zsock_recvfrom(sock, (void *)buf, max_len, flags, - (struct sockaddr *)src_addr, - addrlen ? &addrlen_copy : NULL); - - if (addrlen) { - K_OOPS(k_usermode_to_copy(addrlen, &addrlen_copy, - sizeof(socklen_t))); - } - - return ret; -} -#include -#endif /* CONFIG_USERSPACE */ - -ssize_t zsock_recvmsg_ctx(struct net_context *ctx, struct msghdr *msg, - int flags) -{ - enum net_sock_type sock_type = net_context_get_type(ctx); - size_t i, max_len = 0; - - if (msg == NULL) { - errno = EINVAL; - return -1; - } - - if (msg->msg_iov == NULL) { - errno = ENOMEM; - return -1; - } - - for (i = 0; i < msg->msg_iovlen; i++) { - max_len += msg->msg_iov[i].iov_len; - } - - if (sock_type == SOCK_DGRAM) { - return zsock_recv_dgram(ctx, msg, NULL, max_len, flags, - msg->msg_name, &msg->msg_namelen); - } else if (sock_type == SOCK_STREAM) { - return zsock_recv_stream(ctx, msg, NULL, max_len, flags); - } - - __ASSERT(0, "Unknown socket type"); - - errno = ENOTSUP; - - return -1; -} - -ssize_t z_impl_zsock_recvmsg(int sock, struct msghdr *msg, int flags) -{ - int bytes_received; - - SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, recvmsg, sock, msg, flags); - - bytes_received = VTABLE_CALL(recvmsg, sock, msg, flags); - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, recvmsg, sock, msg, - bytes_received < 0 ? -errno : bytes_received); - - sock_obj_core_update_recv_stats(sock, bytes_received); - - return bytes_received; -} - -#ifdef CONFIG_USERSPACE -ssize_t z_vrfy_zsock_recvmsg(int sock, struct msghdr *msg, int flags) -{ - struct msghdr msg_copy; - size_t iovlen; - size_t i; - int ret; - - if (msg == NULL) { - errno = EINVAL; - return -1; - } - - if (msg->msg_iov == NULL) { - errno = ENOMEM; - return -1; - } - - K_OOPS(k_usermode_from_copy(&msg_copy, (void *)msg, sizeof(msg_copy))); - - k_usermode_from_copy(&iovlen, &msg->msg_iovlen, sizeof(iovlen)); - - msg_copy.msg_name = NULL; - msg_copy.msg_control = NULL; - - msg_copy.msg_iov = k_usermode_alloc_from_copy(msg->msg_iov, - msg->msg_iovlen * sizeof(struct iovec)); - if (!msg_copy.msg_iov) { - errno = ENOMEM; - goto fail; - } - - /* Clear the pointers in the copy so that if the allocation in the - * next loop fails, we do not try to free non allocated memory - * in fail branch. - */ - memset(msg_copy.msg_iov, 0, msg->msg_iovlen * sizeof(struct iovec)); - - for (i = 0; i < iovlen; i++) { - /* TODO: In practice we do not need to copy the actual data - * in msghdr when receiving data but currently there is no - * ready made function to do just that (unless we want to call - * relevant malloc function here ourselves). So just use - * the copying variant for now. - */ - msg_copy.msg_iov[i].iov_base = - k_usermode_alloc_from_copy(msg->msg_iov[i].iov_base, - msg->msg_iov[i].iov_len); - if (!msg_copy.msg_iov[i].iov_base) { - errno = ENOMEM; - goto fail; - } - - msg_copy.msg_iov[i].iov_len = msg->msg_iov[i].iov_len; - } - - if (msg->msg_namelen > 0) { - if (msg->msg_name == NULL) { - errno = EINVAL; - goto fail; - } - - msg_copy.msg_name = k_usermode_alloc_from_copy(msg->msg_name, - msg->msg_namelen); - if (msg_copy.msg_name == NULL) { - errno = ENOMEM; - goto fail; - } - } - - if (msg->msg_controllen > 0) { - if (msg->msg_control == NULL) { - errno = EINVAL; - goto fail; - } - - msg_copy.msg_control = - k_usermode_alloc_from_copy(msg->msg_control, - msg->msg_controllen); - if (msg_copy.msg_control == NULL) { - errno = ENOMEM; - goto fail; - } - } - - ret = z_impl_zsock_recvmsg(sock, &msg_copy, flags); - - /* Do not copy anything back if there was an error or nothing was - * received. - */ - if (ret > 0) { - if (msg->msg_namelen > 0 && msg->msg_name != NULL) { - K_OOPS(k_usermode_to_copy(msg->msg_name, - msg_copy.msg_name, - msg_copy.msg_namelen)); - } - - if (msg->msg_controllen > 0 && - msg->msg_control != NULL) { - K_OOPS(k_usermode_to_copy(msg->msg_control, - msg_copy.msg_control, - msg_copy.msg_controllen)); - - msg->msg_controllen = msg_copy.msg_controllen; - } else { - msg->msg_controllen = 0U; - } - - k_usermode_to_copy(&msg->msg_iovlen, - &msg_copy.msg_iovlen, - sizeof(msg->msg_iovlen)); - - /* The new iovlen cannot be bigger than the original one */ - NET_ASSERT(msg_copy.msg_iovlen <= iovlen); - - for (i = 0; i < iovlen; i++) { - if (i < msg_copy.msg_iovlen) { - K_OOPS(k_usermode_to_copy(msg->msg_iov[i].iov_base, - msg_copy.msg_iov[i].iov_base, - msg_copy.msg_iov[i].iov_len)); - K_OOPS(k_usermode_to_copy(&msg->msg_iov[i].iov_len, - &msg_copy.msg_iov[i].iov_len, - sizeof(msg->msg_iov[i].iov_len))); - } else { - /* Clear out those vectors that we could not populate */ - msg->msg_iov[i].iov_len = 0; - } - } - - k_usermode_to_copy(&msg->msg_flags, - &msg_copy.msg_flags, - sizeof(msg->msg_flags)); - } - - k_free(msg_copy.msg_name); - k_free(msg_copy.msg_control); - - /* Note that we need to free according to original iovlen */ - for (i = 0; i < iovlen; i++) { - k_free(msg_copy.msg_iov[i].iov_base); - } - - k_free(msg_copy.msg_iov); - - return ret; - -fail: - if (msg_copy.msg_name) { - k_free(msg_copy.msg_name); - } - - if (msg_copy.msg_control) { - k_free(msg_copy.msg_control); - } - - if (msg_copy.msg_iov) { - for (i = 0; i < msg_copy.msg_iovlen; i++) { - if (msg_copy.msg_iov[i].iov_base) { - k_free(msg_copy.msg_iov[i].iov_base); - } - } - - k_free(msg_copy.msg_iov); - } - - return -1; -} -#include -#endif /* CONFIG_USERSPACE */ - -/* As this is limited function, we don't follow POSIX signature, with - * "..." instead of last arg. - */ -int z_impl_zsock_fcntl_impl(int sock, int cmd, int flags) -{ - const struct socket_op_vtable *vtable; - struct k_mutex *lock; - void *obj; - int ret; - - SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, fcntl, sock, cmd, flags); - - obj = get_sock_vtable(sock, &vtable, &lock); - if (obj == NULL) { - errno = EBADF; - SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, fcntl, sock, -errno); - return -1; - } - - (void)k_mutex_lock(lock, K_FOREVER); - - ret = zvfs_fdtable_call_ioctl((const struct fd_op_vtable *)vtable, - obj, cmd, flags); - - k_mutex_unlock(lock); - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, fcntl, sock, - ret < 0 ? -errno : ret); - return ret; -} - -#ifdef CONFIG_USERSPACE -static inline int z_vrfy_zsock_fcntl_impl(int sock, int cmd, int flags) -{ - return z_impl_zsock_fcntl_impl(sock, cmd, flags); -} -#include -#endif - -int z_impl_zsock_ioctl_impl(int sock, unsigned long request, va_list args) -{ - const struct socket_op_vtable *vtable; - struct k_mutex *lock; - void *ctx; - int ret; - - SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, ioctl, sock, request); - - ctx = get_sock_vtable(sock, &vtable, &lock); - if (ctx == NULL) { - errno = EBADF; - SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, ioctl, sock, -errno); - return -1; - } - - (void)k_mutex_lock(lock, K_FOREVER); - - NET_DBG("ioctl: ctx=%p, fd=%d, request=%lu", ctx, sock, request); - - ret = vtable->fd_vtable.ioctl(ctx, request, args); - - k_mutex_unlock(lock); - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, ioctl, sock, - ret < 0 ? -errno : ret); - return ret; - -} - -#ifdef CONFIG_USERSPACE -static inline int z_vrfy_zsock_ioctl_impl(int sock, unsigned long request, va_list args) -{ - switch (request) { - case ZFD_IOCTL_FIONBIO: - break; - - case ZFD_IOCTL_FIONREAD: { - int *avail; - - avail = va_arg(args, int *); - K_OOPS(K_SYSCALL_MEMORY_WRITE(avail, sizeof(*avail))); - - break; - } - - default: - errno = EOPNOTSUPP; - return -1; - } - - return z_impl_zsock_ioctl_impl(sock, request, args); -} -#include -#endif - -static int zsock_poll_prepare_ctx(struct net_context *ctx, - struct zsock_pollfd *pfd, - struct k_poll_event **pev, - struct k_poll_event *pev_end) -{ - if (pfd->events & ZSOCK_POLLIN) { - if (*pev == pev_end) { - return -ENOMEM; - } - - (*pev)->obj = &ctx->recv_q; - (*pev)->type = K_POLL_TYPE_FIFO_DATA_AVAILABLE; - (*pev)->mode = K_POLL_MODE_NOTIFY_ONLY; - (*pev)->state = K_POLL_STATE_NOT_READY; - (*pev)++; - } - - if (pfd->events & ZSOCK_POLLOUT) { - if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) && - net_context_get_type(ctx) == SOCK_STREAM && - !net_if_is_ip_offloaded(net_context_get_iface(ctx))) { - if (*pev == pev_end) { - return -ENOMEM; - } - - if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTING) { - (*pev)->obj = net_tcp_conn_sem_get(ctx); - } else { - (*pev)->obj = net_tcp_tx_sem_get(ctx); - } - - (*pev)->type = K_POLL_TYPE_SEM_AVAILABLE; - (*pev)->mode = K_POLL_MODE_NOTIFY_ONLY; - (*pev)->state = K_POLL_STATE_NOT_READY; - (*pev)++; - } else { - return -EALREADY; - } - - } - - /* If socket is already in EOF or error, it can be reported - * immediately, so we tell poll() to short-circuit wait. - */ - if (sock_is_eof(ctx) || sock_is_error(ctx)) { - return -EALREADY; - } - - return 0; -} - -static int zsock_poll_update_ctx(struct net_context *ctx, - struct zsock_pollfd *pfd, - struct k_poll_event **pev) -{ - ARG_UNUSED(ctx); - - if (pfd->events & ZSOCK_POLLIN) { - if ((*pev)->state != K_POLL_STATE_NOT_READY || sock_is_eof(ctx)) { - pfd->revents |= ZSOCK_POLLIN; - } - (*pev)++; - } - if (pfd->events & ZSOCK_POLLOUT) { - if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) && - net_context_get_type(ctx) == SOCK_STREAM && - !net_if_is_ip_offloaded(net_context_get_iface(ctx))) { - if ((*pev)->state != K_POLL_STATE_NOT_READY && - !sock_is_eof(ctx) && - (net_context_get_state(ctx) == NET_CONTEXT_CONNECTED)) { - pfd->revents |= ZSOCK_POLLOUT; - } - (*pev)++; - } else { - pfd->revents |= ZSOCK_POLLOUT; - } - } - - if (sock_is_error(ctx)) { - pfd->revents |= ZSOCK_POLLERR; - } - - if (sock_is_eof(ctx)) { - pfd->revents |= ZSOCK_POLLHUP; - } - - return 0; -} - -static inline int time_left(uint32_t start, uint32_t timeout) -{ - uint32_t elapsed = k_uptime_get_32() - start; - - return timeout - elapsed; -} - -int zsock_poll_internal(struct zsock_pollfd *fds, int nfds, k_timeout_t timeout) -{ - bool retry; - int ret = 0; - int i; - struct zsock_pollfd *pfd; - struct k_poll_event poll_events[CONFIG_NET_SOCKETS_POLL_MAX]; - struct k_poll_event *pev; - struct k_poll_event *pev_end = poll_events + ARRAY_SIZE(poll_events); - const struct fd_op_vtable *vtable; - struct k_mutex *lock; - k_timepoint_t end; - bool offload = false; - const struct fd_op_vtable *offl_vtable = NULL; - void *offl_ctx = NULL; - - end = sys_timepoint_calc(timeout); - - pev = poll_events; - for (pfd = fds, i = nfds; i--; pfd++) { - void *ctx; - int result; - - /* Per POSIX, negative fd's are just ignored */ - if (pfd->fd < 0) { - continue; - } - - ctx = get_sock_vtable(pfd->fd, - (const struct socket_op_vtable **)&vtable, - &lock); - if (ctx == NULL) { - /* Will set POLLNVAL in return loop */ - continue; - } - - (void)k_mutex_lock(lock, K_FOREVER); - - result = zvfs_fdtable_call_ioctl(vtable, ctx, - ZFD_IOCTL_POLL_PREPARE, - pfd, &pev, pev_end); - if (result == -EALREADY) { - /* If POLL_PREPARE returned with EALREADY, it means - * it already detected that some socket is ready. In - * this case, we still perform a k_poll to pick up - * as many events as possible, but without any wait. - */ - timeout = K_NO_WAIT; - end = sys_timepoint_calc(timeout); - result = 0; - } else if (result == -EXDEV) { - /* If POLL_PREPARE returned EXDEV, it means - * it detected an offloaded socket. - * If offloaded socket is used with native TLS, the TLS - * wrapper for the offloaded poll will be used. - * In case the fds array contains a mixup of offloaded - * and non-offloaded sockets, the offloaded poll handler - * shall return an error. - */ - offload = true; - if (offl_vtable == NULL || net_socket_is_tls(ctx)) { - offl_vtable = vtable; - offl_ctx = ctx; - } - - result = 0; - } - - k_mutex_unlock(lock); - - if (result < 0) { - errno = -result; - return -1; - } - } - - if (offload) { - int poll_timeout; - - if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { - poll_timeout = SYS_FOREVER_MS; - } else { - poll_timeout = k_ticks_to_ms_floor32(timeout.ticks); - } - - return zvfs_fdtable_call_ioctl(offl_vtable, offl_ctx, - ZFD_IOCTL_POLL_OFFLOAD, - fds, nfds, poll_timeout); - } - - timeout = sys_timepoint_timeout(end); - - do { - ret = k_poll(poll_events, pev - poll_events, timeout); - /* EAGAIN when timeout expired, EINTR when cancelled (i.e. EOF) */ - if (ret != 0 && ret != -EAGAIN && ret != -EINTR) { - errno = -ret; - return -1; - } - - retry = false; - ret = 0; - - pev = poll_events; - for (pfd = fds, i = nfds; i--; pfd++) { - void *ctx; - int result; - - pfd->revents = 0; - - if (pfd->fd < 0) { - continue; - } - - ctx = get_sock_vtable( - pfd->fd, - (const struct socket_op_vtable **)&vtable, - &lock); - if (ctx == NULL) { - pfd->revents = ZSOCK_POLLNVAL; - ret++; - continue; - } - - (void)k_mutex_lock(lock, K_FOREVER); - - result = zvfs_fdtable_call_ioctl(vtable, ctx, - ZFD_IOCTL_POLL_UPDATE, - pfd, &pev); - k_mutex_unlock(lock); - - if (result == -EAGAIN) { - retry = true; - continue; - } else if (result != 0) { - errno = -result; - return -1; - } - - if (pfd->revents != 0) { - ret++; - } - } - - if (retry) { - if (ret > 0) { - break; - } - - timeout = sys_timepoint_timeout(end); - - if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { - break; - } - } - } while (retry); - - return ret; -} - -int z_impl_zsock_poll(struct zsock_pollfd *fds, int nfds, int poll_timeout) -{ - k_timeout_t timeout; - int ret; - - SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, poll, fds, nfds, poll_timeout); - - if (poll_timeout < 0) { - timeout = K_FOREVER; - } else { - timeout = K_MSEC(poll_timeout); - } - - ret = zsock_poll_internal(fds, nfds, timeout); - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, poll, fds, nfds, - ret < 0 ? -errno : ret); - return ret; -} - -#ifdef CONFIG_USERSPACE -static inline int z_vrfy_zsock_poll(struct zsock_pollfd *fds, - int nfds, int timeout) -{ - struct zsock_pollfd *fds_copy; - size_t fds_size; - int ret; - - /* Copy fds array from user mode */ - if (size_mul_overflow(nfds, sizeof(struct zsock_pollfd), &fds_size)) { - errno = EFAULT; - return -1; - } - fds_copy = k_usermode_alloc_from_copy((void *)fds, fds_size); - if (!fds_copy) { - errno = ENOMEM; - return -1; - } - - ret = z_impl_zsock_poll(fds_copy, nfds, timeout); - - if (ret >= 0) { - k_usermode_to_copy((void *)fds, fds_copy, fds_size); - } - k_free(fds_copy); - - return ret; -} -#include -#endif - -int z_impl_zsock_inet_pton(sa_family_t family, const char *src, void *dst) -{ - if (net_addr_pton(family, src, dst) == 0) { - return 1; - } else { - return 0; - } -} - -#ifdef CONFIG_USERSPACE -static inline int z_vrfy_zsock_inet_pton(sa_family_t family, - const char *src, void *dst) -{ - int dst_size; - char src_copy[NET_IPV6_ADDR_LEN]; - char dst_copy[sizeof(struct in6_addr)]; - int ret; - - switch (family) { - case AF_INET: - dst_size = sizeof(struct in_addr); - break; - case AF_INET6: - dst_size = sizeof(struct in6_addr); - break; - default: - errno = EAFNOSUPPORT; - return -1; - } - - K_OOPS(k_usermode_string_copy(src_copy, (char *)src, sizeof(src_copy))); - ret = z_impl_zsock_inet_pton(family, src_copy, dst_copy); - K_OOPS(k_usermode_to_copy(dst, dst_copy, dst_size)); - - return ret; -} -#include -#endif - -static enum tcp_conn_option get_tcp_option(int optname) -{ - switch (optname) { - case TCP_KEEPIDLE: - return TCP_OPT_KEEPIDLE; - case TCP_KEEPINTVL: - return TCP_OPT_KEEPINTVL; - case TCP_KEEPCNT: - return TCP_OPT_KEEPCNT; - } - - return -EINVAL; -} - -int zsock_getsockopt_ctx(struct net_context *ctx, int level, int optname, - void *optval, socklen_t *optlen) -{ - int ret; - - switch (level) { - case SOL_SOCKET: - switch (optname) { - case SO_ERROR: { - if (*optlen != sizeof(int)) { - errno = EINVAL; - return -1; - } - - *(int *)optval = POINTER_TO_INT(ctx->user_data); - - return 0; - } - - case SO_TYPE: { - int type = (int)net_context_get_type(ctx); - - if (*optlen != sizeof(type)) { - errno = EINVAL; - return -1; - } - - *(int *)optval = type; - - return 0; - } - - case SO_TXTIME: - if (IS_ENABLED(CONFIG_NET_CONTEXT_TXTIME)) { - ret = net_context_get_option(ctx, - NET_OPT_TXTIME, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - break; - - case SO_PROTOCOL: { - int proto = (int)net_context_get_proto(ctx); - - if (*optlen != sizeof(proto)) { - errno = EINVAL; - return -1; - } - - *(int *)optval = proto; - - return 0; - } - - case SO_DOMAIN: { - if (*optlen != sizeof(int)) { - errno = EINVAL; - return -1; - } - - *(int *)optval = net_context_get_family(ctx); - - return 0; - } - - break; - - case SO_RCVBUF: - if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVBUF)) { - ret = net_context_get_option(ctx, - NET_OPT_RCVBUF, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - break; - - case SO_SNDBUF: - if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDBUF)) { - ret = net_context_get_option(ctx, - NET_OPT_SNDBUF, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - break; - - case SO_REUSEADDR: - if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEADDR)) { - ret = net_context_get_option(ctx, - NET_OPT_REUSEADDR, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - break; - - case SO_REUSEPORT: - if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEPORT)) { - ret = net_context_get_option(ctx, - NET_OPT_REUSEPORT, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - break; - - case SO_KEEPALIVE: - if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE) && - net_context_get_proto(ctx) == IPPROTO_TCP) { - ret = net_tcp_get_option(ctx, - TCP_OPT_KEEPALIVE, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - - break; - - case SO_TIMESTAMPING: - if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING)) { - ret = net_context_get_option(ctx, - NET_OPT_TIMESTAMPING, - optval, optlen); - - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - - break; - } - - break; - - case IPPROTO_TCP: - switch (optname) { - case TCP_NODELAY: - ret = net_tcp_get_option(ctx, TCP_OPT_NODELAY, optval, optlen); - return ret; - - case TCP_KEEPIDLE: - __fallthrough; - case TCP_KEEPINTVL: - __fallthrough; - case TCP_KEEPCNT: - if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE)) { - ret = net_tcp_get_option(ctx, - get_tcp_option(optname), - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - - break; - } - - break; - - case IPPROTO_IP: - switch (optname) { - case IP_TOS: - if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) { - ret = net_context_get_option(ctx, - NET_OPT_DSCP_ECN, - optval, - optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - - break; - - case IP_TTL: - ret = net_context_get_option(ctx, NET_OPT_TTL, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - - case IP_MULTICAST_TTL: - ret = net_context_get_option(ctx, NET_OPT_MCAST_TTL, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - - break; - - case IPPROTO_IPV6: - switch (optname) { - case IPV6_V6ONLY: - if (IS_ENABLED(CONFIG_NET_IPV4_MAPPING_TO_IPV6)) { - ret = net_context_get_option(ctx, - NET_OPT_IPV6_V6ONLY, - optval, - optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - - break; - - case IPV6_ADDR_PREFERENCES: - if (IS_ENABLED(CONFIG_NET_IPV6)) { - ret = net_context_get_option(ctx, - NET_OPT_ADDR_PREFERENCES, - optval, - optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - - break; - - case IPV6_TCLASS: - if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) { - ret = net_context_get_option(ctx, - NET_OPT_DSCP_ECN, - optval, - optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - - break; - - case IPV6_UNICAST_HOPS: - ret = net_context_get_option(ctx, - NET_OPT_UNICAST_HOP_LIMIT, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - - case IPV6_MULTICAST_HOPS: - ret = net_context_get_option(ctx, - NET_OPT_MCAST_HOP_LIMIT, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - - break; - } - - errno = ENOPROTOOPT; return -1; } +#include +#endif /* CONFIG_USERSPACE */ -int z_impl_zsock_getsockopt(int sock, int level, int optname, - void *optval, socklen_t *optlen) +/* As this is limited function, we don't follow POSIX signature, with + * "..." instead of last arg. + */ +int z_impl_zsock_fcntl_impl(int sock, int cmd, int flags) { + const struct socket_op_vtable *vtable; + struct k_mutex *lock; + void *obj; int ret; - SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, getsockopt, sock, level, optname); - - ret = VTABLE_CALL(getsockopt, sock, level, optname, optval, optlen); - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, getsockopt, sock, level, optname, - optval, *optlen, ret < 0 ? -errno : ret); - return ret; -} - -#ifdef CONFIG_USERSPACE -int z_vrfy_zsock_getsockopt(int sock, int level, int optname, - void *optval, socklen_t *optlen) -{ - socklen_t kernel_optlen = *(socklen_t *)optlen; - void *kernel_optval; - int ret; + SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, fcntl, sock, cmd, flags); - if (K_SYSCALL_MEMORY_WRITE(optval, kernel_optlen)) { - errno = -EPERM; + obj = get_sock_vtable(sock, &vtable, &lock); + if (obj == NULL) { + errno = EBADF; + SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, fcntl, sock, -errno); return -1; } - kernel_optval = k_usermode_alloc_from_copy((const void *)optval, - kernel_optlen); - K_OOPS(!kernel_optval); - - ret = z_impl_zsock_getsockopt(sock, level, optname, - kernel_optval, &kernel_optlen); + (void)k_mutex_lock(lock, K_FOREVER); - K_OOPS(k_usermode_to_copy((void *)optval, kernel_optval, kernel_optlen)); - K_OOPS(k_usermode_to_copy((void *)optlen, &kernel_optlen, - sizeof(socklen_t))); + ret = zvfs_fdtable_call_ioctl((const struct fd_op_vtable *)vtable, + obj, cmd, flags); - k_free(kernel_optval); + k_mutex_unlock(lock); + SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, fcntl, sock, + ret < 0 ? -errno : ret); return ret; } -#include -#endif /* CONFIG_USERSPACE */ -static int ipv4_multicast_group(struct net_context *ctx, const void *optval, - socklen_t optlen, bool do_join) +#ifdef CONFIG_USERSPACE +static inline int z_vrfy_zsock_fcntl_impl(int sock, int cmd, int flags) { - struct ip_mreqn *mreqn; - struct net_if *iface; - int ifindex, ret; + return z_impl_zsock_fcntl_impl(sock, cmd, flags); +} +#include +#endif - if (optval == NULL || optlen != sizeof(struct ip_mreqn)) { - errno = EINVAL; - return -1; - } +int z_impl_zsock_ioctl_impl(int sock, unsigned long request, va_list args) +{ + const struct socket_op_vtable *vtable; + struct k_mutex *lock; + void *ctx; + int ret; - mreqn = (struct ip_mreqn *)optval; + SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, ioctl, sock, request); - if (mreqn->imr_multiaddr.s_addr == INADDR_ANY) { - errno = EINVAL; + ctx = get_sock_vtable(sock, &vtable, &lock); + if (ctx == NULL) { + errno = EBADF; + SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, ioctl, sock, -errno); return -1; } - if (mreqn->imr_ifindex != 0) { - iface = net_if_get_by_index(mreqn->imr_ifindex); - } else { - ifindex = net_if_ipv4_addr_lookup_by_index(&mreqn->imr_address); - iface = net_if_get_by_index(ifindex); - } + (void)k_mutex_lock(lock, K_FOREVER); - if (iface == NULL) { - /* Check if ctx has already an interface and if not, - * then select the default interface. - */ - if (ctx->iface <= 0) { - iface = net_if_get_default(); - } else { - iface = net_if_get_by_index(ctx->iface); - } + NET_DBG("ioctl: ctx=%p, fd=%d, request=%lu", ctx, sock, request); - if (iface == NULL) { - errno = EINVAL; - return -1; - } - } + ret = vtable->fd_vtable.ioctl(ctx, request, args); - if (do_join) { - ret = net_ipv4_igmp_join(iface, &mreqn->imr_multiaddr, NULL); - } else { - ret = net_ipv4_igmp_leave(iface, &mreqn->imr_multiaddr); - } + k_mutex_unlock(lock); - if (ret < 0) { - errno = -ret; - return -1; - } + SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, ioctl, sock, + ret < 0 ? -errno : ret); + return ret; - return 0; } -static int ipv6_multicast_group(struct net_context *ctx, const void *optval, - socklen_t optlen, bool do_join) +#ifdef CONFIG_USERSPACE +static inline int z_vrfy_zsock_ioctl_impl(int sock, unsigned long request, va_list args) { - struct ipv6_mreq *mreq; - struct net_if *iface; - int ret; - - if (optval == NULL || optlen != sizeof(struct ipv6_mreq)) { - errno = EINVAL; - return -1; - } - - mreq = (struct ipv6_mreq *)optval; - - if (memcmp(&mreq->ipv6mr_multiaddr, - net_ipv6_unspecified_address(), - sizeof(mreq->ipv6mr_multiaddr)) == 0) { - errno = EINVAL; - return -1; - } + switch (request) { + case ZFD_IOCTL_FIONBIO: + break; - iface = net_if_get_by_index(mreq->ipv6mr_ifindex); - if (iface == NULL) { - /* Check if ctx has already an interface and if not, - * then select the default interface. - */ - if (ctx->iface <= 0) { - iface = net_if_get_default(); - } else { - iface = net_if_get_by_index(ctx->iface); - } + case ZFD_IOCTL_FIONREAD: { + int *avail; - if (iface == NULL) { - errno = ENOENT; - return -1; - } - } + avail = va_arg(args, int *); + K_OOPS(K_SYSCALL_MEMORY_WRITE(avail, sizeof(*avail))); - if (do_join) { - ret = net_ipv6_mld_join(iface, &mreq->ipv6mr_multiaddr); - } else { - ret = net_ipv6_mld_leave(iface, &mreq->ipv6mr_multiaddr); + break; } - if (ret < 0) { - errno = -ret; + default: + errno = EOPNOTSUPP; return -1; } - return 0; + return z_impl_zsock_ioctl_impl(sock, request, args); } +#include +#endif -int zsock_setsockopt_ctx(struct net_context *ctx, int level, int optname, - const void *optval, socklen_t optlen) +int zsock_poll_internal(struct zsock_pollfd *fds, int nfds, k_timeout_t timeout) { - int ret; - - switch (level) { - case SOL_SOCKET: - switch (optname) { - case SO_RCVBUF: - if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVBUF)) { - ret = net_context_set_option(ctx, - NET_OPT_RCVBUF, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - - break; - - case SO_SNDBUF: - if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDBUF)) { - ret = net_context_set_option(ctx, - NET_OPT_SNDBUF, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } + bool retry; + int ret = 0; + int i; + struct zsock_pollfd *pfd; + struct k_poll_event poll_events[CONFIG_NET_SOCKETS_POLL_MAX]; + struct k_poll_event *pev; + struct k_poll_event *pev_end = poll_events + ARRAY_SIZE(poll_events); + const struct fd_op_vtable *vtable; + struct k_mutex *lock; + k_timepoint_t end; + bool offload = false; + const struct fd_op_vtable *offl_vtable = NULL; + void *offl_ctx = NULL; - break; + end = sys_timepoint_calc(timeout); - case SO_REUSEADDR: - if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEADDR)) { - ret = net_context_set_option(ctx, - NET_OPT_REUSEADDR, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } + pev = poll_events; + for (pfd = fds, i = nfds; i--; pfd++) { + void *ctx; + int result; - return 0; - } + /* Per POSIX, negative fd's are just ignored */ + if (pfd->fd < 0) { + continue; + } - break; + ctx = get_sock_vtable(pfd->fd, + (const struct socket_op_vtable **)&vtable, + &lock); + if (ctx == NULL) { + /* Will set POLLNVAL in return loop */ + continue; + } - case SO_REUSEPORT: - if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEPORT)) { - ret = net_context_set_option(ctx, - NET_OPT_REUSEPORT, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } + (void)k_mutex_lock(lock, K_FOREVER); - return 0; + result = zvfs_fdtable_call_ioctl(vtable, ctx, + ZFD_IOCTL_POLL_PREPARE, + pfd, &pev, pev_end); + if (result == -EALREADY) { + /* If POLL_PREPARE returned with EALREADY, it means + * it already detected that some socket is ready. In + * this case, we still perform a k_poll to pick up + * as many events as possible, but without any wait. + */ + timeout = K_NO_WAIT; + end = sys_timepoint_calc(timeout); + result = 0; + } else if (result == -EXDEV) { + /* If POLL_PREPARE returned EXDEV, it means + * it detected an offloaded socket. + * If offloaded socket is used with native TLS, the TLS + * wrapper for the offloaded poll will be used. + * In case the fds array contains a mixup of offloaded + * and non-offloaded sockets, the offloaded poll handler + * shall return an error. + */ + offload = true; + if (offl_vtable == NULL || net_socket_is_tls(ctx)) { + offl_vtable = vtable; + offl_ctx = ctx; } - break; - - case SO_PRIORITY: - if (IS_ENABLED(CONFIG_NET_CONTEXT_PRIORITY)) { - ret = net_context_set_option(ctx, - NET_OPT_PRIORITY, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } + result = 0; + } - break; + k_mutex_unlock(lock); - case SO_RCVTIMEO: - if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVTIMEO)) { - const struct zsock_timeval *tv = optval; - k_timeout_t timeout; + if (result < 0) { + errno = -result; + return -1; + } + } - if (optlen != sizeof(struct zsock_timeval)) { - errno = EINVAL; - return -1; - } + if (offload) { + int poll_timeout; - if (tv->tv_sec == 0 && tv->tv_usec == 0) { - timeout = K_FOREVER; - } else { - timeout = K_USEC(tv->tv_sec * 1000000ULL - + tv->tv_usec); - } + if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { + poll_timeout = SYS_FOREVER_MS; + } else { + poll_timeout = k_ticks_to_ms_floor32(timeout.ticks); + } - ret = net_context_set_option(ctx, - NET_OPT_RCVTIMEO, - &timeout, - sizeof(timeout)); + return zvfs_fdtable_call_ioctl(offl_vtable, offl_ctx, + ZFD_IOCTL_POLL_OFFLOAD, + fds, nfds, poll_timeout); + } - if (ret < 0) { - errno = -ret; - return -1; - } + timeout = sys_timepoint_timeout(end); - return 0; - } + do { + ret = k_poll(poll_events, pev - poll_events, timeout); + /* EAGAIN when timeout expired, EINTR when cancelled (i.e. EOF) */ + if (ret != 0 && ret != -EAGAIN && ret != -EINTR) { + errno = -ret; + return -1; + } - break; - - case SO_SNDTIMEO: - if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDTIMEO)) { - const struct zsock_timeval *tv = optval; - k_timeout_t timeout; - - if (optlen != sizeof(struct zsock_timeval)) { - errno = EINVAL; - return -1; - } - - if (tv->tv_sec == 0 && tv->tv_usec == 0) { - timeout = K_FOREVER; - } else { - timeout = K_USEC(tv->tv_sec * 1000000ULL - + tv->tv_usec); - } - - ret = net_context_set_option(ctx, - NET_OPT_SNDTIMEO, - &timeout, - sizeof(timeout)); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } + retry = false; + ret = 0; - break; + pev = poll_events; + for (pfd = fds, i = nfds; i--; pfd++) { + void *ctx; + int result; - case SO_TXTIME: - if (IS_ENABLED(CONFIG_NET_CONTEXT_TXTIME)) { - ret = net_context_set_option(ctx, - NET_OPT_TXTIME, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } + pfd->revents = 0; - return 0; + if (pfd->fd < 0) { + continue; } - break; - - case SO_SOCKS5: - if (IS_ENABLED(CONFIG_SOCKS)) { - ret = net_context_set_option(ctx, - NET_OPT_SOCKS5, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - net_context_set_proxy_enabled(ctx, true); - - return 0; + ctx = get_sock_vtable( + pfd->fd, + (const struct socket_op_vtable **)&vtable, + &lock); + if (ctx == NULL) { + pfd->revents = ZSOCK_POLLNVAL; + ret++; + continue; } - break; - - case SO_BINDTODEVICE: { - struct net_if *iface; - const struct ifreq *ifreq = optval; - - if (net_context_get_family(ctx) != AF_INET && - net_context_get_family(ctx) != AF_INET6) { - errno = EAFNOSUPPORT; - return -1; - } + (void)k_mutex_lock(lock, K_FOREVER); - /* optlen equal to 0 or empty interface name should - * remove the binding. - */ - if ((optlen == 0) || (ifreq != NULL && - strlen(ifreq->ifr_name) == 0)) { - ctx->flags &= ~NET_CONTEXT_BOUND_TO_IFACE; - return 0; - } + result = zvfs_fdtable_call_ioctl(vtable, ctx, + ZFD_IOCTL_POLL_UPDATE, + pfd, &pev); + k_mutex_unlock(lock); - if ((ifreq == NULL) || (optlen != sizeof(*ifreq))) { - errno = EINVAL; + if (result == -EAGAIN) { + retry = true; + continue; + } else if (result != 0) { + errno = -result; return -1; } - if (IS_ENABLED(CONFIG_NET_INTERFACE_NAME)) { - ret = net_if_get_by_name(ifreq->ifr_name); - if (ret < 0) { - errno = -ret; - return -1; - } - - iface = net_if_get_by_index(ret); - if (iface == NULL) { - errno = ENODEV; - return -1; - } - } else { - const struct device *dev; - - dev = device_get_binding(ifreq->ifr_name); - if (dev == NULL) { - errno = ENODEV; - return -1; - } - - iface = net_if_lookup_by_dev(dev); - if (iface == NULL) { - errno = ENODEV; - return -1; - } + if (pfd->revents != 0) { + ret++; } - - net_context_bind_iface(ctx, iface); - - return 0; } - case SO_LINGER: - /* ignored. for compatibility purposes only */ - return 0; - - case SO_KEEPALIVE: - if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE) && - net_context_get_proto(ctx) == IPPROTO_TCP) { - ret = net_tcp_set_option(ctx, - TCP_OPT_KEEPALIVE, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - - break; - - case SO_TIMESTAMPING: - if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING)) { - ret = net_context_set_option(ctx, - NET_OPT_TIMESTAMPING, - optval, optlen); - - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; + if (retry) { + if (ret > 0) { + break; } - break; - } - - break; + timeout = sys_timepoint_timeout(end); - case IPPROTO_TCP: - switch (optname) { - case TCP_NODELAY: - ret = net_tcp_set_option(ctx, - TCP_OPT_NODELAY, optval, optlen); - return ret; - - case TCP_KEEPIDLE: - __fallthrough; - case TCP_KEEPINTVL: - __fallthrough; - case TCP_KEEPCNT: - if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE)) { - ret = net_tcp_set_option(ctx, - get_tcp_option(optname), - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; + if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { + break; } - - break; } - break; - - case IPPROTO_IP: - switch (optname) { - case IP_TOS: - if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) { - ret = net_context_set_option(ctx, - NET_OPT_DSCP_ECN, - optval, - optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - - break; - - case IP_PKTINFO: - if (IS_ENABLED(CONFIG_NET_IPV4) && - IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO)) { - ret = net_context_set_option(ctx, - NET_OPT_RECV_PKTINFO, - optval, - optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } - - break; - - case IP_MULTICAST_TTL: - ret = net_context_set_option(ctx, NET_OPT_MCAST_TTL, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } + } while (retry); - return 0; + return ret; +} - case IP_TTL: - ret = net_context_set_option(ctx, NET_OPT_TTL, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } +int z_impl_zsock_poll(struct zsock_pollfd *fds, int nfds, int poll_timeout) +{ + k_timeout_t timeout; + int ret; - return 0; + SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, poll, fds, nfds, poll_timeout); - case IP_ADD_MEMBERSHIP: - if (IS_ENABLED(CONFIG_NET_IPV4)) { - return ipv4_multicast_group(ctx, optval, - optlen, true); - } + if (poll_timeout < 0) { + timeout = K_FOREVER; + } else { + timeout = K_MSEC(poll_timeout); + } - break; + ret = zsock_poll_internal(fds, nfds, timeout); - case IP_DROP_MEMBERSHIP: - if (IS_ENABLED(CONFIG_NET_IPV4)) { - return ipv4_multicast_group(ctx, optval, - optlen, false); - } + SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, poll, fds, nfds, + ret < 0 ? -errno : ret); + return ret; +} - break; - } +#ifdef CONFIG_USERSPACE +static inline int z_vrfy_zsock_poll(struct zsock_pollfd *fds, + int nfds, int timeout) +{ + struct zsock_pollfd *fds_copy; + size_t fds_size; + int ret; - break; + /* Copy fds array from user mode */ + if (size_mul_overflow(nfds, sizeof(struct zsock_pollfd), &fds_size)) { + errno = EFAULT; + return -1; + } + fds_copy = k_usermode_alloc_from_copy((void *)fds, fds_size); + if (!fds_copy) { + errno = ENOMEM; + return -1; + } - case IPPROTO_IPV6: - switch (optname) { - case IPV6_V6ONLY: - if (IS_ENABLED(CONFIG_NET_IPV4_MAPPING_TO_IPV6)) { - ret = net_context_set_option(ctx, - NET_OPT_IPV6_V6ONLY, - optval, - optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - } + ret = z_impl_zsock_poll(fds_copy, nfds, timeout); - return 0; - - case IPV6_RECVPKTINFO: - if (IS_ENABLED(CONFIG_NET_IPV6) && - IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO)) { - ret = net_context_set_option(ctx, - NET_OPT_RECV_PKTINFO, - optval, - optlen); - if (ret < 0) { - errno = -ret; - return -1; - } - - return 0; - } + if (ret >= 0) { + k_usermode_to_copy((void *)fds, fds_copy, fds_size); + } + k_free(fds_copy); - break; + return ret; +} +#include +#endif - case IPV6_ADDR_PREFERENCES: - if (IS_ENABLED(CONFIG_NET_IPV6)) { - ret = net_context_set_option(ctx, - NET_OPT_ADDR_PREFERENCES, - optval, - optlen); - if (ret < 0) { - errno = -ret; - return -1; - } +int z_impl_zsock_inet_pton(sa_family_t family, const char *src, void *dst) +{ + if (net_addr_pton(family, src, dst) == 0) { + return 1; + } else { + return 0; + } +} - return 0; - } +#ifdef CONFIG_USERSPACE +static inline int z_vrfy_zsock_inet_pton(sa_family_t family, + const char *src, void *dst) +{ + int dst_size; + char src_copy[NET_IPV6_ADDR_LEN]; + char dst_copy[sizeof(struct in6_addr)]; + int ret; - break; + switch (family) { + case AF_INET: + dst_size = sizeof(struct in_addr); + break; + case AF_INET6: + dst_size = sizeof(struct in6_addr); + break; + default: + errno = EAFNOSUPPORT; + return -1; + } - case IPV6_TCLASS: - if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) { - ret = net_context_set_option(ctx, - NET_OPT_DSCP_ECN, - optval, - optlen); - if (ret < 0) { - errno = -ret; - return -1; - } + K_OOPS(k_usermode_string_copy(src_copy, (char *)src, sizeof(src_copy))); + ret = z_impl_zsock_inet_pton(family, src_copy, dst_copy); + K_OOPS(k_usermode_to_copy(dst, dst_copy, dst_size)); - return 0; - } + return ret; +} +#include +#endif - break; +int z_impl_zsock_getsockopt(int sock, int level, int optname, + void *optval, socklen_t *optlen) +{ + int ret; - case IPV6_UNICAST_HOPS: - ret = net_context_set_option(ctx, - NET_OPT_UNICAST_HOP_LIMIT, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } + SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, getsockopt, sock, level, optname); - return 0; + ret = VTABLE_CALL(getsockopt, sock, level, optname, optval, optlen); - case IPV6_MULTICAST_HOPS: - ret = net_context_set_option(ctx, - NET_OPT_MCAST_HOP_LIMIT, - optval, optlen); - if (ret < 0) { - errno = -ret; - return -1; - } + SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, getsockopt, sock, level, optname, + optval, *optlen, ret < 0 ? -errno : ret); + return ret; +} - return 0; +#ifdef CONFIG_USERSPACE +int z_vrfy_zsock_getsockopt(int sock, int level, int optname, + void *optval, socklen_t *optlen) +{ + socklen_t kernel_optlen = *(socklen_t *)optlen; + void *kernel_optval; + int ret; - case IPV6_ADD_MEMBERSHIP: - if (IS_ENABLED(CONFIG_NET_IPV6)) { - return ipv6_multicast_group(ctx, optval, - optlen, true); - } + if (K_SYSCALL_MEMORY_WRITE(optval, kernel_optlen)) { + errno = -EPERM; + return -1; + } - break; + kernel_optval = k_usermode_alloc_from_copy((const void *)optval, + kernel_optlen); + K_OOPS(!kernel_optval); - case IPV6_DROP_MEMBERSHIP: - if (IS_ENABLED(CONFIG_NET_IPV6)) { - return ipv6_multicast_group(ctx, optval, - optlen, false); - } + ret = z_impl_zsock_getsockopt(sock, level, optname, + kernel_optval, &kernel_optlen); - break; - } + K_OOPS(k_usermode_to_copy((void *)optval, kernel_optval, kernel_optlen)); + K_OOPS(k_usermode_to_copy((void *)optlen, &kernel_optlen, + sizeof(socklen_t))); - break; - } + k_free(kernel_optval); - errno = ENOPROTOOPT; - return -1; + return ret; } +#include +#endif /* CONFIG_USERSPACE */ int z_impl_zsock_setsockopt(int sock, int level, int optname, const void *optval, socklen_t optlen) @@ -3584,54 +1151,6 @@ int z_vrfy_zsock_setsockopt(int sock, int level, int optname, #include #endif /* CONFIG_USERSPACE */ -int zsock_getpeername_ctx(struct net_context *ctx, struct sockaddr *addr, - socklen_t *addrlen) -{ - socklen_t newlen = 0; - - if (addr == NULL || addrlen == NULL) { - SET_ERRNO(-EINVAL); - } - - if (!(ctx->flags & NET_CONTEXT_REMOTE_ADDR_SET)) { - SET_ERRNO(-ENOTCONN); - } - - if (net_context_get_type(ctx) == SOCK_STREAM && - net_context_get_state(ctx) != NET_CONTEXT_CONNECTED) { - SET_ERRNO(-ENOTCONN); - } - - if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->remote.sa_family == AF_INET) { - struct sockaddr_in addr4 = { 0 }; - - addr4.sin_family = AF_INET; - addr4.sin_port = net_sin(&ctx->remote)->sin_port; - memcpy(&addr4.sin_addr, &net_sin(&ctx->remote)->sin_addr, - sizeof(struct in_addr)); - newlen = sizeof(struct sockaddr_in); - - memcpy(addr, &addr4, MIN(*addrlen, newlen)); - } else if (IS_ENABLED(CONFIG_NET_IPV6) && - ctx->remote.sa_family == AF_INET6) { - struct sockaddr_in6 addr6 = { 0 }; - - addr6.sin6_family = AF_INET6; - addr6.sin6_port = net_sin6(&ctx->remote)->sin6_port; - memcpy(&addr6.sin6_addr, &net_sin6(&ctx->remote)->sin6_addr, - sizeof(struct in6_addr)); - newlen = sizeof(struct sockaddr_in6); - - memcpy(addr, &addr6, MIN(*addrlen, newlen)); - } else { - SET_ERRNO(-EINVAL); - } - - *addrlen = newlen; - - return 0; -} - int z_impl_zsock_getpeername(int sock, struct sockaddr *addr, socklen_t *addrlen) { @@ -3677,56 +1196,6 @@ static inline int z_vrfy_zsock_getpeername(int sock, struct sockaddr *addr, #include #endif /* CONFIG_USERSPACE */ -int zsock_getsockname_ctx(struct net_context *ctx, struct sockaddr *addr, - socklen_t *addrlen) -{ - socklen_t newlen = 0; - int ret; - - if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->local.family == AF_INET) { - struct sockaddr_in addr4 = { 0 }; - - if (net_sin_ptr(&ctx->local)->sin_addr == NULL) { - SET_ERRNO(-EINVAL); - } - - newlen = sizeof(struct sockaddr_in); - - ret = net_context_get_local_addr(ctx, - (struct sockaddr *)&addr4, - &newlen); - if (ret < 0) { - SET_ERRNO(-ret); - } - - memcpy(addr, &addr4, MIN(*addrlen, newlen)); - - } else if (IS_ENABLED(CONFIG_NET_IPV6) && ctx->local.family == AF_INET6) { - struct sockaddr_in6 addr6 = { 0 }; - - if (net_sin6_ptr(&ctx->local)->sin6_addr == NULL) { - SET_ERRNO(-EINVAL); - } - - newlen = sizeof(struct sockaddr_in6); - - ret = net_context_get_local_addr(ctx, - (struct sockaddr *)&addr6, - &newlen); - if (ret < 0) { - SET_ERRNO(-ret); - } - - memcpy(addr, &addr6, MIN(*addrlen, newlen)); - } else { - SET_ERRNO(-EINVAL); - } - - *addrlen = newlen; - - return 0; -} - int z_impl_zsock_getsockname(int sock, struct sockaddr *addr, socklen_t *addrlen) { @@ -3771,210 +1240,3 @@ static inline int z_vrfy_zsock_getsockname(int sock, struct sockaddr *addr, } #include #endif /* CONFIG_USERSPACE */ - -static ssize_t sock_read_vmeth(void *obj, void *buffer, size_t count) -{ - return zsock_recvfrom_ctx(obj, buffer, count, 0, NULL, 0); -} - -static ssize_t sock_write_vmeth(void *obj, const void *buffer, size_t count) -{ - return zsock_sendto_ctx(obj, buffer, count, 0, NULL, 0); -} - -static void zsock_ctx_set_lock(struct net_context *ctx, struct k_mutex *lock) -{ - ctx->cond.lock = lock; -} - -static int sock_ioctl_vmeth(void *obj, unsigned int request, va_list args) -{ - switch (request) { - - /* In Zephyr, fcntl() is just an alias of ioctl(). */ - case F_GETFL: - if (sock_is_nonblock(obj)) { - return O_NONBLOCK; - } - - return 0; - - case F_SETFL: { - int flags; - - flags = va_arg(args, int); - - if (flags & O_NONBLOCK) { - sock_set_flag(obj, SOCK_NONBLOCK, SOCK_NONBLOCK); - } else { - sock_set_flag(obj, SOCK_NONBLOCK, 0); - } - - return 0; - } - - case ZFD_IOCTL_POLL_PREPARE: { - struct zsock_pollfd *pfd; - struct k_poll_event **pev; - struct k_poll_event *pev_end; - - pfd = va_arg(args, struct zsock_pollfd *); - pev = va_arg(args, struct k_poll_event **); - pev_end = va_arg(args, struct k_poll_event *); - - return zsock_poll_prepare_ctx(obj, pfd, pev, pev_end); - } - - case ZFD_IOCTL_POLL_UPDATE: { - struct zsock_pollfd *pfd; - struct k_poll_event **pev; - - pfd = va_arg(args, struct zsock_pollfd *); - pev = va_arg(args, struct k_poll_event **); - - return zsock_poll_update_ctx(obj, pfd, pev); - } - - case ZFD_IOCTL_SET_LOCK: { - struct k_mutex *lock; - - lock = va_arg(args, struct k_mutex *); - - zsock_ctx_set_lock(obj, lock); - return 0; - } - - case ZFD_IOCTL_FIONBIO: - sock_set_flag(obj, SOCK_NONBLOCK, SOCK_NONBLOCK); - return 0; - - case ZFD_IOCTL_FIONREAD: { - int *avail = va_arg(args, int *); - - *avail = zsock_fionread_ctx(obj); - return 0; - } - - default: - errno = EOPNOTSUPP; - return -1; - } -} - -static int sock_shutdown_vmeth(void *obj, int how) -{ - return zsock_shutdown_ctx(obj, how); -} - -static int sock_bind_vmeth(void *obj, const struct sockaddr *addr, - socklen_t addrlen) -{ - return zsock_bind_ctx(obj, addr, addrlen); -} - -static int sock_connect_vmeth(void *obj, const struct sockaddr *addr, - socklen_t addrlen) -{ - return zsock_connect_ctx(obj, addr, addrlen); -} - -static int sock_listen_vmeth(void *obj, int backlog) -{ - return zsock_listen_ctx(obj, backlog); -} - -static int sock_accept_vmeth(void *obj, struct sockaddr *addr, - socklen_t *addrlen) -{ - return zsock_accept_ctx(obj, addr, addrlen); -} - -static ssize_t sock_sendto_vmeth(void *obj, const void *buf, size_t len, - int flags, const struct sockaddr *dest_addr, - socklen_t addrlen) -{ - return zsock_sendto_ctx(obj, buf, len, flags, dest_addr, addrlen); -} - -static ssize_t sock_sendmsg_vmeth(void *obj, const struct msghdr *msg, - int flags) -{ - return zsock_sendmsg_ctx(obj, msg, flags); -} - -static ssize_t sock_recvmsg_vmeth(void *obj, struct msghdr *msg, int flags) -{ - return zsock_recvmsg_ctx(obj, msg, flags); -} - -static ssize_t sock_recvfrom_vmeth(void *obj, void *buf, size_t max_len, - int flags, struct sockaddr *src_addr, - socklen_t *addrlen) -{ - return zsock_recvfrom_ctx(obj, buf, max_len, flags, - src_addr, addrlen); -} - -static int sock_getsockopt_vmeth(void *obj, int level, int optname, - void *optval, socklen_t *optlen) -{ - return zsock_getsockopt_ctx(obj, level, optname, optval, optlen); -} - -static int sock_setsockopt_vmeth(void *obj, int level, int optname, - const void *optval, socklen_t optlen) -{ - return zsock_setsockopt_ctx(obj, level, optname, optval, optlen); -} - -static int sock_close_vmeth(void *obj) -{ - return zsock_close_ctx(obj); -} -static int sock_getpeername_vmeth(void *obj, struct sockaddr *addr, - socklen_t *addrlen) -{ - return zsock_getpeername_ctx(obj, addr, addrlen); -} - -static int sock_getsockname_vmeth(void *obj, struct sockaddr *addr, - socklen_t *addrlen) -{ - return zsock_getsockname_ctx(obj, addr, addrlen); -} - -const struct socket_op_vtable sock_fd_op_vtable = { - .fd_vtable = { - .read = sock_read_vmeth, - .write = sock_write_vmeth, - .close = sock_close_vmeth, - .ioctl = sock_ioctl_vmeth, - }, - .shutdown = sock_shutdown_vmeth, - .bind = sock_bind_vmeth, - .connect = sock_connect_vmeth, - .listen = sock_listen_vmeth, - .accept = sock_accept_vmeth, - .sendto = sock_sendto_vmeth, - .sendmsg = sock_sendmsg_vmeth, - .recvmsg = sock_recvmsg_vmeth, - .recvfrom = sock_recvfrom_vmeth, - .getsockopt = sock_getsockopt_vmeth, - .setsockopt = sock_setsockopt_vmeth, - .getpeername = sock_getpeername_vmeth, - .getsockname = sock_getsockname_vmeth, -}; - -#if defined(CONFIG_NET_NATIVE) -static bool inet_is_supported(int family, int type, int proto) -{ - if (family != AF_INET && family != AF_INET6) { - return false; - } - - return true; -} - -NET_SOCKET_REGISTER(af_inet46, NET_SOCKET_DEFAULT_PRIO, AF_UNSPEC, - inet_is_supported, zsock_socket_internal); -#endif /* CONFIG_NET_NATIVE */ diff --git a/subsys/net/lib/sockets/sockets_inet.c b/subsys/net/lib/sockets/sockets_inet.c new file mode 100644 index 000000000000..88e4e47bfe4a --- /dev/null +++ b/subsys/net/lib/sockets/sockets_inet.c @@ -0,0 +1,2822 @@ +/* + * Copyright (c) 2017 Linaro Limited + * Copyright (c) 2021 Nordic Semiconductor + * Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* Zephyr headers */ +#include +LOG_MODULE_DECLARE(net_sock, CONFIG_NET_SOCKETS_LOG_LEVEL); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_SOCKS) +#include "socks.h" +#endif + +#include +#include "../../ip/ipv6.h" + +#include "../../ip/net_stats.h" + +#include "sockets_internal.h" +#include "../../ip/tcp_internal.h" +#include "../../ip/net_private.h" + +const struct socket_op_vtable sock_fd_op_vtable; + +static void zsock_received_cb(struct net_context *ctx, + struct net_pkt *pkt, + union net_ip_header *ip_hdr, + union net_proto_header *proto_hdr, + int status, + void *user_data); + +static int fifo_wait_non_empty(struct k_fifo *fifo, k_timeout_t timeout) +{ + struct k_poll_event events[] = { + K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE, + K_POLL_MODE_NOTIFY_ONLY, fifo), + }; + + return k_poll(events, ARRAY_SIZE(events), timeout); +} + +static void zsock_flush_queue(struct net_context *ctx) +{ + bool is_listen = net_context_get_state(ctx) == NET_CONTEXT_LISTENING; + void *p; + + /* recv_q and accept_q are shared via a union */ + while ((p = k_fifo_get(&ctx->recv_q, K_NO_WAIT)) != NULL) { + if (is_listen) { + NET_DBG("discarding ctx %p", p); + net_context_put(p); + } else { + NET_DBG("discarding pkt %p", p); + net_pkt_unref(p); + } + } + + /* Some threads might be waiting on recv, cancel the wait */ + k_fifo_cancel_wait(&ctx->recv_q); + + /* Wake reader if it was sleeping */ + (void)k_condvar_signal(&ctx->cond.recv); +} + +static int zsock_socket_internal(int family, int type, int proto) +{ + int fd = zvfs_reserve_fd(); + struct net_context *ctx; + int res; + + if (fd < 0) { + return -1; + } + + if (proto == 0) { + if (family == AF_INET || family == AF_INET6) { + if (type == SOCK_DGRAM) { + proto = IPPROTO_UDP; + } else if (type == SOCK_STREAM) { + proto = IPPROTO_TCP; + } + } + } + + res = net_context_get(family, type, proto, &ctx); + if (res < 0) { + zvfs_free_fd(fd); + errno = -res; + return -1; + } + + /* Initialize user_data, all other calls will preserve it */ + ctx->user_data = NULL; + + /* The socket flags are stored here */ + ctx->socket_data = NULL; + + /* recv_q and accept_q are in union */ + k_fifo_init(&ctx->recv_q); + + /* Condition variable is used to avoid keeping lock for a long time + * when waiting data to be received + */ + k_condvar_init(&ctx->cond.recv); + + /* TCP context is effectively owned by both application + * and the stack: stack may detect that peer closed/aborted + * connection, but it must not dispose of the context behind + * the application back. Likewise, when application "closes" + * context, it's not disposed of immediately - there's yet + * closing handshake for stack to perform. + */ + if (proto == IPPROTO_TCP) { + net_context_ref(ctx); + } + + zvfs_finalize_typed_fd(fd, ctx, (const struct fd_op_vtable *)&sock_fd_op_vtable, + ZVFS_MODE_IFSOCK); + + NET_DBG("socket: ctx=%p, fd=%d", ctx, fd); + + return fd; +} + +int zsock_close_ctx(struct net_context *ctx) +{ + int ret; + + /* Reset callbacks to avoid any race conditions while + * flushing queues. No need to check return values here, + * as these are fail-free operations and we're closing + * socket anyway. + */ + if (net_context_get_state(ctx) == NET_CONTEXT_LISTENING) { + (void)net_context_accept(ctx, NULL, K_NO_WAIT, NULL); + } else { + (void)net_context_recv(ctx, NULL, K_NO_WAIT, NULL); + } + + ctx->user_data = INT_TO_POINTER(EINTR); + sock_set_error(ctx); + + zsock_flush_queue(ctx); + + ret = net_context_put(ctx); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; +} + +static void zsock_accepted_cb(struct net_context *new_ctx, + struct sockaddr *addr, socklen_t addrlen, + int status, void *user_data) +{ + struct net_context *parent = user_data; + + NET_DBG("parent=%p, ctx=%p, st=%d", parent, new_ctx, status); + + if (status == 0) { + /* This just installs a callback, so cannot fail. */ + (void)net_context_recv(new_ctx, zsock_received_cb, K_NO_WAIT, + NULL); + k_fifo_init(&new_ctx->recv_q); + k_condvar_init(&new_ctx->cond.recv); + + k_fifo_put(&parent->accept_q, new_ctx); + + /* TCP context is effectively owned by both application + * and the stack: stack may detect that peer closed/aborted + * connection, but it must not dispose of the context behind + * the application back. Likewise, when application "closes" + * context, it's not disposed of immediately - there's yet + * closing handshake for stack to perform. + */ + net_context_ref(new_ctx); + + (void)k_condvar_signal(&parent->cond.recv); + } + +} + +static void zsock_received_cb(struct net_context *ctx, + struct net_pkt *pkt, + union net_ip_header *ip_hdr, + union net_proto_header *proto_hdr, + int status, + void *user_data) +{ + if (ctx->cond.lock) { + (void)k_mutex_lock(ctx->cond.lock, K_FOREVER); + } + + NET_DBG("ctx=%p, pkt=%p, st=%d, user_data=%p", ctx, pkt, status, + user_data); + + if (status < 0) { + ctx->user_data = INT_TO_POINTER(-status); + sock_set_error(ctx); + } + + /* if pkt is NULL, EOF */ + if (!pkt) { + struct net_pkt *last_pkt = k_fifo_peek_tail(&ctx->recv_q); + + if (!last_pkt) { + /* If there're no packets in the queue, recv() may + * be blocked waiting on it to become non-empty, + * so cancel that wait. + */ + sock_set_eof(ctx); + k_fifo_cancel_wait(&ctx->recv_q); + NET_DBG("Marked socket %p as peer-closed", ctx); + } else { + net_pkt_set_eof(last_pkt, true); + NET_DBG("Set EOF flag on pkt %p", last_pkt); + } + + goto unlock; + } + + /* Normal packet */ + net_pkt_set_eof(pkt, false); + + net_pkt_set_rx_stats_tick(pkt, k_cycle_get_32()); + + k_fifo_put(&ctx->recv_q, pkt); + +unlock: + /* Wake reader if it was sleeping */ + (void)k_condvar_signal(&ctx->cond.recv); + + if (ctx->cond.lock) { + (void)k_mutex_unlock(ctx->cond.lock); + } +} + +int zsock_shutdown_ctx(struct net_context *ctx, int how) +{ + int ret; + + if (how == ZSOCK_SHUT_RD) { + if (net_context_get_state(ctx) == NET_CONTEXT_LISTENING) { + ret = net_context_accept(ctx, NULL, K_NO_WAIT, NULL); + if (ret < 0) { + errno = -ret; + return -1; + } + } else { + ret = net_context_recv(ctx, NULL, K_NO_WAIT, NULL); + if (ret < 0) { + errno = -ret; + return -1; + } + } + + sock_set_eof(ctx); + + zsock_flush_queue(ctx); + + return 0; + } + + if (how == ZSOCK_SHUT_WR || how == ZSOCK_SHUT_RDWR) { + errno = ENOTSUP; + return -1; + } + + errno = EINVAL; + return -1; +} + +int zsock_bind_ctx(struct net_context *ctx, const struct sockaddr *addr, + socklen_t addrlen) +{ + int ret; + + ret = net_context_bind(ctx, addr, addrlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + /* For DGRAM socket, we expect to receive packets after call to + * bind(), but for STREAM socket, next expected operation is + * listen(), which doesn't work if recv callback is set. + */ + if (net_context_get_type(ctx) == SOCK_DGRAM) { + ret = net_context_recv(ctx, zsock_received_cb, K_NO_WAIT, + ctx->user_data); + if (ret < 0) { + errno = -ret; + return -1; + } + } + + return 0; +} + +static void zsock_connected_cb(struct net_context *ctx, int status, void *user_data) +{ + if (status < 0) { + ctx->user_data = INT_TO_POINTER(-status); + sock_set_error(ctx); + } +} + +int zsock_connect_ctx(struct net_context *ctx, const struct sockaddr *addr, + socklen_t addrlen) +{ + k_timeout_t timeout = K_MSEC(CONFIG_NET_SOCKETS_CONNECT_TIMEOUT); + net_context_connect_cb_t cb = NULL; + int ret; + +#if defined(CONFIG_SOCKS) + if (net_context_is_proxy_enabled(ctx)) { + ret = net_socks5_connect(ctx, addr, addrlen); + if (ret < 0) { + errno = -ret; + return -1; + } + ret = net_context_recv(ctx, zsock_received_cb, + K_NO_WAIT, ctx->user_data); + if (ret < 0) { + errno = -ret; + return -1; + } + return 0; + } +#endif + if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTED) { + return 0; + } + + if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTING) { + if (sock_is_error(ctx)) { + errno = POINTER_TO_INT(ctx->user_data); + return -1; + } + + errno = EALREADY; + return -1; + } + + if (sock_is_nonblock(ctx)) { + timeout = K_NO_WAIT; + cb = zsock_connected_cb; + } + + if (net_context_get_type(ctx) == SOCK_STREAM) { + /* For STREAM sockets net_context_recv() only installs + * recv callback w/o side effects, and it has to be done + * first to avoid race condition, when TCP stream data + * arrives right after connect. + */ + ret = net_context_recv(ctx, zsock_received_cb, + K_NO_WAIT, ctx->user_data); + if (ret < 0) { + errno = -ret; + return -1; + } + ret = net_context_connect(ctx, addr, addrlen, cb, + timeout, ctx->user_data); + if (ret < 0) { + errno = -ret; + return -1; + } + } else { + ret = net_context_connect(ctx, addr, addrlen, cb, + timeout, ctx->user_data); + if (ret < 0) { + errno = -ret; + return -1; + } + ret = net_context_recv(ctx, zsock_received_cb, + K_NO_WAIT, ctx->user_data); + if (ret < 0) { + errno = -ret; + return -1; + } + } + + return 0; +} + +int zsock_listen_ctx(struct net_context *ctx, int backlog) +{ + int ret; + + ret = net_context_listen(ctx, backlog); + if (ret < 0) { + errno = -ret; + return -1; + } + + ret = net_context_accept(ctx, zsock_accepted_cb, K_NO_WAIT, ctx); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; +} + +int zsock_accept_ctx(struct net_context *parent, struct sockaddr *addr, + socklen_t *addrlen) +{ + struct net_context *ctx; + struct net_pkt *last_pkt; + int fd, ret; + + if (!sock_is_nonblock(parent)) { + k_timeout_t timeout = K_FOREVER; + + /* accept() can reuse zsock_wait_data(), as underneath it's + * monitoring the same queue (accept_q is an alias for recv_q). + */ + ret = zsock_wait_data(parent, &timeout); + if (ret < 0) { + errno = -ret; + return -1; + } + } + + ctx = k_fifo_get(&parent->accept_q, K_NO_WAIT); + if (ctx == NULL) { + errno = EAGAIN; + return -1; + } + + fd = zvfs_reserve_fd(); + if (fd < 0) { + zsock_flush_queue(ctx); + net_context_put(ctx); + return -1; + } + + /* Check if the connection is already disconnected */ + last_pkt = k_fifo_peek_tail(&ctx->recv_q); + if (last_pkt) { + if (net_pkt_eof(last_pkt)) { + sock_set_eof(ctx); + zvfs_free_fd(fd); + zsock_flush_queue(ctx); + net_context_put(ctx); + errno = ECONNABORTED; + return -1; + } + } + + if (net_context_is_closing(ctx)) { + errno = ECONNABORTED; + zvfs_free_fd(fd); + zsock_flush_queue(ctx); + net_context_put(ctx); + return -1; + } + + net_context_set_accepting(ctx, false); + + + if (addr != NULL && addrlen != NULL) { + int len = MIN(*addrlen, sizeof(ctx->remote)); + + memcpy(addr, &ctx->remote, len); + /* addrlen is a value-result argument, set to actual + * size of source address + */ + if (ctx->remote.sa_family == AF_INET) { + *addrlen = sizeof(struct sockaddr_in); + } else if (ctx->remote.sa_family == AF_INET6) { + *addrlen = sizeof(struct sockaddr_in6); + } else { + zvfs_free_fd(fd); + errno = ENOTSUP; + zsock_flush_queue(ctx); + net_context_put(ctx); + return -1; + } + } + + NET_DBG("accept: ctx=%p, fd=%d", ctx, fd); + + zvfs_finalize_typed_fd(fd, ctx, (const struct fd_op_vtable *)&sock_fd_op_vtable, + ZVFS_MODE_IFSOCK); + + return fd; +} + +#define WAIT_BUFS_INITIAL_MS 10 +#define WAIT_BUFS_MAX_MS 100 +#define MAX_WAIT_BUFS K_MSEC(CONFIG_NET_SOCKET_MAX_SEND_WAIT) + +static int send_check_and_wait(struct net_context *ctx, int status, + k_timepoint_t buf_timeout, k_timeout_t timeout, + uint32_t *retry_timeout) +{ + if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { + goto out; + } + + if (status != -ENOBUFS && status != -EAGAIN) { + goto out; + } + + /* If we cannot get any buffers in reasonable + * amount of time, then do not wait forever as + * there might be some bigger issue. + * If we get -EAGAIN and cannot recover, then + * it means that the sending window is blocked + * and we just cannot send anything. + */ + if (sys_timepoint_expired(buf_timeout)) { + if (status == -ENOBUFS) { + status = -ENOMEM; + } else { + status = -ENOBUFS; + } + + goto out; + } + + if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) { + *retry_timeout = + MIN(*retry_timeout, k_ticks_to_ms_floor32(timeout.ticks)); + } + + if (ctx->cond.lock) { + (void)k_mutex_unlock(ctx->cond.lock); + } + + if (status == -ENOBUFS) { + /* We can monitor net_pkt/net_buf availability, so just wait. */ + k_sleep(K_MSEC(*retry_timeout)); + } + + if (status == -EAGAIN) { + if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) && + net_context_get_type(ctx) == SOCK_STREAM && + !net_if_is_ip_offloaded(net_context_get_iface(ctx))) { + struct k_poll_event event; + + k_poll_event_init(&event, + K_POLL_TYPE_SEM_AVAILABLE, + K_POLL_MODE_NOTIFY_ONLY, + net_tcp_tx_sem_get(ctx)); + + k_poll(&event, 1, K_MSEC(*retry_timeout)); + } else { + k_sleep(K_MSEC(*retry_timeout)); + } + } + /* Exponentially increase the retry timeout + * Cap the value to WAIT_BUFS_MAX_MS + */ + *retry_timeout = MIN(WAIT_BUFS_MAX_MS, *retry_timeout << 1); + + if (ctx->cond.lock) { + (void)k_mutex_lock(ctx->cond.lock, K_FOREVER); + } + + return 0; + +out: + errno = -status; + return -1; +} + +ssize_t zsock_sendto_ctx(struct net_context *ctx, const void *buf, size_t len, + int flags, + const struct sockaddr *dest_addr, socklen_t addrlen) +{ + k_timeout_t timeout = K_FOREVER; + uint32_t retry_timeout = WAIT_BUFS_INITIAL_MS; + k_timepoint_t buf_timeout, end; + int status; + + if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) { + timeout = K_NO_WAIT; + buf_timeout = sys_timepoint_calc(K_NO_WAIT); + } else { + net_context_get_option(ctx, NET_OPT_SNDTIMEO, &timeout, NULL); + buf_timeout = sys_timepoint_calc(MAX_WAIT_BUFS); + } + end = sys_timepoint_calc(timeout); + + /* Register the callback before sending in order to receive the response + * from the peer. + */ + status = net_context_recv(ctx, zsock_received_cb, + K_NO_WAIT, ctx->user_data); + if (status < 0) { + errno = -status; + return -1; + } + + while (1) { + if (dest_addr) { + status = net_context_sendto(ctx, buf, len, dest_addr, + addrlen, NULL, timeout, + ctx->user_data); + } else { + status = net_context_send(ctx, buf, len, NULL, timeout, + ctx->user_data); + } + + if (status < 0) { + status = send_check_and_wait(ctx, status, buf_timeout, + timeout, &retry_timeout); + if (status < 0) { + return status; + } + + /* Update the timeout value in case loop is repeated. */ + timeout = sys_timepoint_timeout(end); + + continue; + } + + break; + } + + return status; +} + +ssize_t zsock_sendmsg_ctx(struct net_context *ctx, const struct msghdr *msg, + int flags) +{ + k_timeout_t timeout = K_FOREVER; + uint32_t retry_timeout = WAIT_BUFS_INITIAL_MS; + k_timepoint_t buf_timeout, end; + int status; + + if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) { + timeout = K_NO_WAIT; + buf_timeout = sys_timepoint_calc(K_NO_WAIT); + } else { + net_context_get_option(ctx, NET_OPT_SNDTIMEO, &timeout, NULL); + buf_timeout = sys_timepoint_calc(MAX_WAIT_BUFS); + } + end = sys_timepoint_calc(timeout); + + while (1) { + status = net_context_sendmsg(ctx, msg, flags, NULL, timeout, NULL); + if (status < 0) { + status = send_check_and_wait(ctx, status, + buf_timeout, + timeout, &retry_timeout); + if (status < 0) { + return status; + } + + /* Update the timeout value in case loop is repeated. */ + timeout = sys_timepoint_timeout(end); + + continue; + } + + break; + } + + return status; +} + +static int sock_get_pkt_src_addr(struct net_pkt *pkt, + enum net_ip_protocol proto, + struct sockaddr *addr, + socklen_t addrlen) +{ + int ret = 0; + struct net_pkt_cursor backup; + uint16_t *port; + + if (!addr || !pkt) { + return -EINVAL; + } + + net_pkt_cursor_backup(pkt, &backup); + net_pkt_cursor_init(pkt); + + addr->sa_family = net_pkt_family(pkt); + + if (IS_ENABLED(CONFIG_NET_IPV4) && + net_pkt_family(pkt) == AF_INET) { + NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access, + struct net_ipv4_hdr); + struct sockaddr_in *addr4 = net_sin(addr); + struct net_ipv4_hdr *ipv4_hdr; + + if (addrlen < sizeof(struct sockaddr_in)) { + ret = -EINVAL; + goto error; + } + + ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data( + pkt, &ipv4_access); + if (!ipv4_hdr || + net_pkt_acknowledge_data(pkt, &ipv4_access) || + net_pkt_skip(pkt, net_pkt_ipv4_opts_len(pkt))) { + ret = -ENOBUFS; + goto error; + } + + net_ipv4_addr_copy_raw((uint8_t *)&addr4->sin_addr, ipv4_hdr->src); + port = &addr4->sin_port; + } else if (IS_ENABLED(CONFIG_NET_IPV6) && + net_pkt_family(pkt) == AF_INET6) { + NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv6_access, + struct net_ipv6_hdr); + struct sockaddr_in6 *addr6 = net_sin6(addr); + struct net_ipv6_hdr *ipv6_hdr; + + if (addrlen < sizeof(struct sockaddr_in6)) { + ret = -EINVAL; + goto error; + } + + ipv6_hdr = (struct net_ipv6_hdr *)net_pkt_get_data( + pkt, &ipv6_access); + if (!ipv6_hdr || + net_pkt_acknowledge_data(pkt, &ipv6_access) || + net_pkt_skip(pkt, net_pkt_ipv6_ext_len(pkt))) { + ret = -ENOBUFS; + goto error; + } + + net_ipv6_addr_copy_raw((uint8_t *)&addr6->sin6_addr, ipv6_hdr->src); + port = &addr6->sin6_port; + } else { + ret = -ENOTSUP; + goto error; + } + + if (IS_ENABLED(CONFIG_NET_UDP) && proto == IPPROTO_UDP) { + NET_PKT_DATA_ACCESS_DEFINE(udp_access, struct net_udp_hdr); + struct net_udp_hdr *udp_hdr; + + udp_hdr = (struct net_udp_hdr *)net_pkt_get_data(pkt, + &udp_access); + if (!udp_hdr) { + ret = -ENOBUFS; + goto error; + } + + *port = udp_hdr->src_port; + } else if (IS_ENABLED(CONFIG_NET_TCP) && proto == IPPROTO_TCP) { + NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct net_tcp_hdr); + struct net_tcp_hdr *tcp_hdr; + + tcp_hdr = (struct net_tcp_hdr *)net_pkt_get_data(pkt, + &tcp_access); + if (!tcp_hdr) { + ret = -ENOBUFS; + goto error; + } + + *port = tcp_hdr->src_port; + } else { + ret = -ENOTSUP; + } + +error: + net_pkt_cursor_restore(pkt, &backup); + + return ret; +} + +#if defined(CONFIG_NET_OFFLOAD) +static bool net_pkt_remote_addr_is_unspecified(struct net_pkt *pkt) +{ + bool ret = true; + + if (net_pkt_family(pkt) == AF_INET) { + ret = net_ipv4_is_addr_unspecified(&net_sin(&pkt->remote)->sin_addr); + } else if (net_pkt_family(pkt) == AF_INET6) { + ret = net_ipv6_is_addr_unspecified(&net_sin6(&pkt->remote)->sin6_addr); + } + + return ret; +} + +static int sock_get_offload_pkt_src_addr(struct net_pkt *pkt, + struct net_context *ctx, + struct sockaddr *addr, + socklen_t addrlen) +{ + int ret = 0; + + if (!addr || !pkt) { + return -EINVAL; + } + + if (!net_pkt_remote_addr_is_unspecified(pkt)) { + if (IS_ENABLED(CONFIG_NET_IPV4) && + net_pkt_family(pkt) == AF_INET) { + if (addrlen < sizeof(struct sockaddr_in)) { + ret = -EINVAL; + goto error; + } + + memcpy(addr, &pkt->remote, sizeof(struct sockaddr_in)); + } else if (IS_ENABLED(CONFIG_NET_IPV6) && + net_pkt_family(pkt) == AF_INET6) { + if (addrlen < sizeof(struct sockaddr_in6)) { + ret = -EINVAL; + goto error; + } + + memcpy(addr, &pkt->remote, sizeof(struct sockaddr_in6)); + } + } else if (ctx->flags & NET_CONTEXT_REMOTE_ADDR_SET) { + memcpy(addr, &ctx->remote, MIN(addrlen, sizeof(ctx->remote))); + } else { + ret = -ENOTSUP; + } + +error: + return ret; +} +#else +static int sock_get_offload_pkt_src_addr(struct net_pkt *pkt, + struct net_context *ctx, + struct sockaddr *addr, + socklen_t addrlen) +{ + ARG_UNUSED(pkt); + ARG_UNUSED(ctx); + ARG_UNUSED(addr); + ARG_UNUSED(addrlen); + + return 0; +} +#endif /* CONFIG_NET_OFFLOAD */ + +void net_socket_update_tc_rx_time(struct net_pkt *pkt, uint32_t end_tick) +{ + net_pkt_set_rx_stats_tick(pkt, end_tick); + + net_stats_update_tc_rx_time(net_pkt_iface(pkt), + net_pkt_priority(pkt), + net_pkt_create_time(pkt), + end_tick); + + SYS_PORT_TRACING_FUNC(net, rx_time, pkt, end_tick); + + if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)) { + uint32_t val, prev = net_pkt_create_time(pkt); + int i; + + for (i = 0; i < net_pkt_stats_tick_count(pkt); i++) { + if (!net_pkt_stats_tick(pkt)[i]) { + break; + } + + val = net_pkt_stats_tick(pkt)[i] - prev; + prev = net_pkt_stats_tick(pkt)[i]; + net_pkt_stats_tick(pkt)[i] = val; + } + + net_stats_update_tc_rx_time_detail( + net_pkt_iface(pkt), + net_pkt_priority(pkt), + net_pkt_stats_tick(pkt)); + } +} + +int zsock_wait_data(struct net_context *ctx, k_timeout_t *timeout) +{ + int ret; + + if (ctx->cond.lock == NULL) { + /* For some reason the lock pointer is not set properly + * when called by fdtable.c:zvfs_finalize_fd() + * It is not practical to try to figure out the fdtable + * lock at this point so skip it. + */ + NET_WARN("No lock pointer set for context %p", ctx); + return -EINVAL; + } + + if (k_fifo_is_empty(&ctx->recv_q)) { + /* Wait for the data to arrive but without holding a lock */ + ret = k_condvar_wait(&ctx->cond.recv, ctx->cond.lock, + *timeout); + if (ret < 0) { + return ret; + } + + if (sock_is_error(ctx)) { + return -POINTER_TO_INT(ctx->user_data); + } + } + + return 0; +} + +static int insert_pktinfo(struct msghdr *msg, int level, int type, + void *pktinfo, size_t pktinfo_len) +{ + struct cmsghdr *cmsg; + + if (msg->msg_controllen < pktinfo_len) { + return -EINVAL; + } + + for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) { + if (cmsg->cmsg_len == 0) { + break; + } + } + + if (cmsg == NULL) { + return -EINVAL; + } + + cmsg->cmsg_len = CMSG_LEN(pktinfo_len); + cmsg->cmsg_level = level; + cmsg->cmsg_type = type; + + memcpy(CMSG_DATA(cmsg), pktinfo, pktinfo_len); + + return 0; +} + +static int add_timestamping(struct net_context *ctx, + struct net_pkt *pkt, + struct msghdr *msg) +{ + uint8_t timestamping = 0; + + net_context_get_option(ctx, NET_OPT_TIMESTAMPING, ×tamping, NULL); + + if (timestamping) { + return insert_pktinfo(msg, SOL_SOCKET, SO_TIMESTAMPING, + net_pkt_timestamp(pkt), sizeof(struct net_ptp_time)); + } + + return -ENOTSUP; +} + +static int add_pktinfo(struct net_context *ctx, + struct net_pkt *pkt, + struct msghdr *msg) +{ + int ret = -ENOTSUP; + struct net_pkt_cursor backup; + + net_pkt_cursor_backup(pkt, &backup); + net_pkt_cursor_init(pkt); + + if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) { + NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access, + struct net_ipv4_hdr); + struct in_pktinfo info; + struct net_ipv4_hdr *ipv4_hdr; + + ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data( + pkt, &ipv4_access); + if (ipv4_hdr == NULL || + net_pkt_acknowledge_data(pkt, &ipv4_access) || + net_pkt_skip(pkt, net_pkt_ipv4_opts_len(pkt))) { + ret = -ENOBUFS; + goto out; + } + + net_ipv4_addr_copy_raw((uint8_t *)&info.ipi_addr, ipv4_hdr->dst); + net_ipv4_addr_copy_raw((uint8_t *)&info.ipi_spec_dst, + (uint8_t *)net_sin_ptr(&ctx->local)->sin_addr); + info.ipi_ifindex = ctx->iface; + + ret = insert_pktinfo(msg, IPPROTO_IP, IP_PKTINFO, + &info, sizeof(info)); + + goto out; + } + + if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) { + NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv6_access, + struct net_ipv6_hdr); + struct in6_pktinfo info; + struct net_ipv6_hdr *ipv6_hdr; + + ipv6_hdr = (struct net_ipv6_hdr *)net_pkt_get_data( + pkt, &ipv6_access); + if (ipv6_hdr == NULL || + net_pkt_acknowledge_data(pkt, &ipv6_access) || + net_pkt_skip(pkt, net_pkt_ipv6_ext_len(pkt))) { + ret = -ENOBUFS; + goto out; + } + + net_ipv6_addr_copy_raw((uint8_t *)&info.ipi6_addr, ipv6_hdr->dst); + info.ipi6_ifindex = ctx->iface; + + ret = insert_pktinfo(msg, IPPROTO_IPV6, IPV6_RECVPKTINFO, + &info, sizeof(info)); + + goto out; + } + +out: + net_pkt_cursor_restore(pkt, &backup); + + return ret; +} + +static int update_msg_controllen(struct msghdr *msg) +{ + struct cmsghdr *cmsg; + size_t cmsg_space = 0; + + for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) { + if (cmsg->cmsg_len == 0) { + break; + } + cmsg_space += cmsg->cmsg_len; + } + msg->msg_controllen = cmsg_space; + + return 0; +} + +static inline ssize_t zsock_recv_dgram(struct net_context *ctx, + struct msghdr *msg, + void *buf, + size_t max_len, + int flags, + struct sockaddr *src_addr, + socklen_t *addrlen) +{ + k_timeout_t timeout = K_FOREVER; + size_t recv_len = 0; + size_t read_len; + struct net_pkt_cursor backup; + struct net_pkt *pkt; + + if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) { + timeout = K_NO_WAIT; + } else { + int ret; + + net_context_get_option(ctx, NET_OPT_RCVTIMEO, &timeout, NULL); + + ret = zsock_wait_data(ctx, &timeout); + if (ret < 0) { + errno = -ret; + return -1; + } + } + + if (flags & ZSOCK_MSG_PEEK) { + int res; + + res = fifo_wait_non_empty(&ctx->recv_q, timeout); + /* EAGAIN when timeout expired, EINTR when cancelled */ + if (res && res != -EAGAIN && res != -EINTR) { + errno = -res; + return -1; + } + + pkt = k_fifo_peek_head(&ctx->recv_q); + } else { + pkt = k_fifo_get(&ctx->recv_q, timeout); + } + + if (!pkt) { + errno = EAGAIN; + return -1; + } + + net_pkt_cursor_backup(pkt, &backup); + + if (src_addr && addrlen) { + if (IS_ENABLED(CONFIG_NET_OFFLOAD) && + net_if_is_ip_offloaded(net_context_get_iface(ctx))) { + int ret; + + ret = sock_get_offload_pkt_src_addr(pkt, ctx, src_addr, + *addrlen); + if (ret < 0) { + errno = -ret; + NET_DBG("sock_get_offload_pkt_src_addr %d", ret); + goto fail; + } + } else { + int ret; + + ret = sock_get_pkt_src_addr(pkt, net_context_get_proto(ctx), + src_addr, *addrlen); + if (ret < 0) { + errno = -ret; + NET_DBG("sock_get_pkt_src_addr %d", ret); + goto fail; + } + } + + /* addrlen is a value-result argument, set to actual + * size of source address + */ + if (src_addr->sa_family == AF_INET) { + *addrlen = sizeof(struct sockaddr_in); + } else if (src_addr->sa_family == AF_INET6) { + *addrlen = sizeof(struct sockaddr_in6); + } else { + errno = ENOTSUP; + goto fail; + } + } + + if (msg != NULL) { + int iovec = 0; + size_t tmp_read_len; + + if (msg->msg_iovlen < 1 || msg->msg_iov == NULL) { + errno = ENOMEM; + return -1; + } + + recv_len = net_pkt_remaining_data(pkt); + tmp_read_len = read_len = MIN(recv_len, max_len); + + while (tmp_read_len > 0) { + size_t len; + + buf = msg->msg_iov[iovec].iov_base; + if (buf == NULL) { + errno = EINVAL; + return -1; + } + + len = MIN(tmp_read_len, msg->msg_iov[iovec].iov_len); + + if (net_pkt_read(pkt, buf, len)) { + errno = ENOBUFS; + goto fail; + } + + if (len <= tmp_read_len) { + tmp_read_len -= len; + msg->msg_iov[iovec].iov_len = len; + iovec++; + } else { + errno = EINVAL; + return -1; + } + } + + msg->msg_iovlen = iovec; + + if (recv_len != read_len) { + msg->msg_flags |= ZSOCK_MSG_TRUNC; + } + + } else { + recv_len = net_pkt_remaining_data(pkt); + read_len = MIN(recv_len, max_len); + + if (net_pkt_read(pkt, buf, read_len)) { + errno = ENOBUFS; + goto fail; + } + } + + if (msg != NULL) { + if (msg->msg_control != NULL) { + if (msg->msg_controllen > 0) { + if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING) && + net_context_is_timestamping_set(ctx)) { + if (add_timestamping(ctx, pkt, msg) < 0) { + msg->msg_flags |= ZSOCK_MSG_CTRUNC; + } + } + + if (IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO) && + net_context_is_recv_pktinfo_set(ctx)) { + if (add_pktinfo(ctx, pkt, msg) < 0) { + msg->msg_flags |= ZSOCK_MSG_CTRUNC; + } + } + + /* msg_controllen must be updated to reflect the total length of all + * control messages in the buffer. If there are no control data, + * msg_controllen will be cleared as expected It will also take into + * account pre-existing control data + */ + update_msg_controllen(msg); + } + } else { + msg->msg_controllen = 0U; + } + } + + if ((IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) || + IS_ENABLED(CONFIG_TRACING_NET_CORE)) && + !(flags & ZSOCK_MSG_PEEK)) { + net_socket_update_tc_rx_time(pkt, k_cycle_get_32()); + } + + if (!(flags & ZSOCK_MSG_PEEK)) { + net_pkt_unref(pkt); + } else { + net_pkt_cursor_restore(pkt, &backup); + } + + return (flags & ZSOCK_MSG_TRUNC) ? recv_len : read_len; + +fail: + if (!(flags & ZSOCK_MSG_PEEK)) { + net_pkt_unref(pkt); + } + + return -1; +} + +static size_t zsock_recv_stream_immediate(struct net_context *ctx, uint8_t **buf, size_t *max_len, + int flags) +{ + size_t len; + size_t pkt_len; + size_t recv_len = 0; + struct net_pkt *pkt; + struct net_pkt_cursor backup; + struct net_pkt *origin = NULL; + const bool do_recv = !(buf == NULL || max_len == NULL); + size_t _max_len = (max_len == NULL) ? SIZE_MAX : *max_len; + const bool peek = (flags & ZSOCK_MSG_PEEK) == ZSOCK_MSG_PEEK; + + while (_max_len > 0) { + /* only peek until we know we can dequeue and / or requeue buffer */ + pkt = k_fifo_peek_head(&ctx->recv_q); + if (pkt == NULL || pkt == origin) { + break; + } + + if (origin == NULL) { + /* mark first pkt to avoid cycles when observing */ + origin = pkt; + } + + pkt_len = net_pkt_remaining_data(pkt); + len = MIN(_max_len, pkt_len); + recv_len += len; + _max_len -= len; + + if (do_recv && len > 0) { + if (peek) { + net_pkt_cursor_backup(pkt, &backup); + } + + net_pkt_read(pkt, *buf, len); + /* update buffer position for caller */ + *buf += len; + + if (peek) { + net_pkt_cursor_restore(pkt, &backup); + } + } + + if (do_recv && !peek) { + if (len == pkt_len) { + /* dequeue empty packets when not observing */ + pkt = k_fifo_get(&ctx->recv_q, K_NO_WAIT); + if (net_pkt_eof(pkt)) { + sock_set_eof(ctx); + } + + if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) || + IS_ENABLED(CONFIG_TRACING_NET_CORE)) { + net_socket_update_tc_rx_time(pkt, k_cycle_get_32()); + } + + net_pkt_unref(pkt); + } + } else if (!do_recv || peek) { + /* requeue packets when observing */ + k_fifo_put(&ctx->recv_q, k_fifo_get(&ctx->recv_q, K_NO_WAIT)); + } + } + + if (do_recv) { + /* convey remaining buffer size back to caller */ + *max_len = _max_len; + } + + return recv_len; +} + +static int zsock_fionread_ctx(struct net_context *ctx) +{ + size_t ret = zsock_recv_stream_immediate(ctx, NULL, NULL, 0); + + return MIN(ret, INT_MAX); +} + +static ssize_t zsock_recv_stream_timed(struct net_context *ctx, struct msghdr *msg, + uint8_t *buf, size_t max_len, + int flags, k_timeout_t timeout) +{ + int res; + k_timepoint_t end; + size_t recv_len = 0, iovec = 0, available_len, max_iovlen = 0; + const bool waitall = (flags & ZSOCK_MSG_WAITALL) == ZSOCK_MSG_WAITALL; + + if (msg != NULL && buf == NULL) { + if (msg->msg_iovlen < 1) { + return -EINVAL; + } + + buf = msg->msg_iov[iovec].iov_base; + available_len = msg->msg_iov[iovec].iov_len; + msg->msg_iov[iovec].iov_len = 0; + max_iovlen = msg->msg_iovlen; + } + + for (end = sys_timepoint_calc(timeout); max_len > 0; timeout = sys_timepoint_timeout(end)) { + + if (sock_is_error(ctx)) { + return -POINTER_TO_INT(ctx->user_data); + } + + if (sock_is_eof(ctx)) { + return 0; + } + + if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { + res = zsock_wait_data(ctx, &timeout); + if (res < 0) { + return res; + } + } + + if (msg != NULL) { +again: + res = zsock_recv_stream_immediate(ctx, &buf, &available_len, flags); + recv_len += res; + + if (res == 0 && recv_len == 0 && K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { + return -EAGAIN; + } + + msg->msg_iov[iovec].iov_len += res; + buf = (uint8_t *)(msg->msg_iov[iovec].iov_base) + res; + max_len -= res; + + if (available_len == 0) { + /* All data to this iovec was written */ + iovec++; + + if (iovec == max_iovlen) { + break; + } + + msg->msg_iovlen = iovec; + buf = msg->msg_iov[iovec].iov_base; + available_len = msg->msg_iov[iovec].iov_len; + msg->msg_iov[iovec].iov_len = 0; + + /* If there is more data, read it now and do not wait */ + if (buf != NULL && available_len > 0) { + goto again; + } + + continue; + } + + } else { + res = zsock_recv_stream_immediate(ctx, &buf, &max_len, flags); + recv_len += res; + + if (res == 0) { + if (recv_len == 0 && K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { + return -EAGAIN; + } + } + } + + if (!waitall) { + break; + } + } + + return recv_len; +} + +static ssize_t zsock_recv_stream(struct net_context *ctx, struct msghdr *msg, + void *buf, size_t max_len, int flags) +{ + ssize_t res; + size_t recv_len = 0; + k_timeout_t timeout = K_FOREVER; + + if (!net_context_is_used(ctx)) { + errno = EBADF; + return -1; + } + + if (net_context_get_state(ctx) != NET_CONTEXT_CONNECTED) { + errno = ENOTCONN; + return -1; + } + + if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) { + timeout = K_NO_WAIT; + } else if (!sock_is_eof(ctx) && !sock_is_error(ctx)) { + net_context_get_option(ctx, NET_OPT_RCVTIMEO, &timeout, NULL); + } + + if (max_len == 0) { + /* no bytes requested - done! */ + return 0; + } + + res = zsock_recv_stream_timed(ctx, msg, buf, max_len, flags, timeout); + recv_len += MAX(0, res); + + if (res < 0) { + errno = -res; + return -1; + } + + if (!(flags & ZSOCK_MSG_PEEK)) { + net_context_update_recv_wnd(ctx, recv_len); + } + + return recv_len; +} + +ssize_t zsock_recvfrom_ctx(struct net_context *ctx, void *buf, size_t max_len, + int flags, + struct sockaddr *src_addr, socklen_t *addrlen) +{ + enum net_sock_type sock_type = net_context_get_type(ctx); + + if (max_len == 0) { + return 0; + } + + if (sock_type == SOCK_DGRAM) { + return zsock_recv_dgram(ctx, NULL, buf, max_len, flags, src_addr, addrlen); + } else if (sock_type == SOCK_STREAM) { + return zsock_recv_stream(ctx, NULL, buf, max_len, flags); + } + + __ASSERT(0, "Unknown socket type"); + + errno = ENOTSUP; + + return -1; +} + +ssize_t zsock_recvmsg_ctx(struct net_context *ctx, struct msghdr *msg, + int flags) +{ + enum net_sock_type sock_type = net_context_get_type(ctx); + size_t i, max_len = 0; + + if (msg == NULL) { + errno = EINVAL; + return -1; + } + + if (msg->msg_iov == NULL) { + errno = ENOMEM; + return -1; + } + + for (i = 0; i < msg->msg_iovlen; i++) { + max_len += msg->msg_iov[i].iov_len; + } + + if (sock_type == SOCK_DGRAM) { + return zsock_recv_dgram(ctx, msg, NULL, max_len, flags, + msg->msg_name, &msg->msg_namelen); + } else if (sock_type == SOCK_STREAM) { + return zsock_recv_stream(ctx, msg, NULL, max_len, flags); + } + + __ASSERT(0, "Unknown socket type"); + + errno = ENOTSUP; + + return -1; +} + +static int zsock_poll_prepare_ctx(struct net_context *ctx, + struct zsock_pollfd *pfd, + struct k_poll_event **pev, + struct k_poll_event *pev_end) +{ + if (pfd->events & ZSOCK_POLLIN) { + if (*pev == pev_end) { + return -ENOMEM; + } + + (*pev)->obj = &ctx->recv_q; + (*pev)->type = K_POLL_TYPE_FIFO_DATA_AVAILABLE; + (*pev)->mode = K_POLL_MODE_NOTIFY_ONLY; + (*pev)->state = K_POLL_STATE_NOT_READY; + (*pev)++; + } + + if (pfd->events & ZSOCK_POLLOUT) { + if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) && + net_context_get_type(ctx) == SOCK_STREAM && + !net_if_is_ip_offloaded(net_context_get_iface(ctx))) { + if (*pev == pev_end) { + return -ENOMEM; + } + + if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTING) { + (*pev)->obj = net_tcp_conn_sem_get(ctx); + } else { + (*pev)->obj = net_tcp_tx_sem_get(ctx); + } + + (*pev)->type = K_POLL_TYPE_SEM_AVAILABLE; + (*pev)->mode = K_POLL_MODE_NOTIFY_ONLY; + (*pev)->state = K_POLL_STATE_NOT_READY; + (*pev)++; + } else { + return -EALREADY; + } + + } + + /* If socket is already in EOF or error, it can be reported + * immediately, so we tell poll() to short-circuit wait. + */ + if (sock_is_eof(ctx) || sock_is_error(ctx)) { + return -EALREADY; + } + + return 0; +} + +static int zsock_poll_update_ctx(struct net_context *ctx, + struct zsock_pollfd *pfd, + struct k_poll_event **pev) +{ + ARG_UNUSED(ctx); + + if (pfd->events & ZSOCK_POLLIN) { + if ((*pev)->state != K_POLL_STATE_NOT_READY || sock_is_eof(ctx)) { + pfd->revents |= ZSOCK_POLLIN; + } + (*pev)++; + } + if (pfd->events & ZSOCK_POLLOUT) { + if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) && + net_context_get_type(ctx) == SOCK_STREAM && + !net_if_is_ip_offloaded(net_context_get_iface(ctx))) { + if ((*pev)->state != K_POLL_STATE_NOT_READY && + !sock_is_eof(ctx) && + (net_context_get_state(ctx) == NET_CONTEXT_CONNECTED)) { + pfd->revents |= ZSOCK_POLLOUT; + } + (*pev)++; + } else { + pfd->revents |= ZSOCK_POLLOUT; + } + } + + if (sock_is_error(ctx)) { + pfd->revents |= ZSOCK_POLLERR; + } + + if (sock_is_eof(ctx)) { + pfd->revents |= ZSOCK_POLLHUP; + } + + return 0; +} + +static enum tcp_conn_option get_tcp_option(int optname) +{ + switch (optname) { + case TCP_KEEPIDLE: + return TCP_OPT_KEEPIDLE; + case TCP_KEEPINTVL: + return TCP_OPT_KEEPINTVL; + case TCP_KEEPCNT: + return TCP_OPT_KEEPCNT; + } + + return -EINVAL; +} + +int zsock_getsockopt_ctx(struct net_context *ctx, int level, int optname, + void *optval, socklen_t *optlen) +{ + int ret; + + switch (level) { + case SOL_SOCKET: + switch (optname) { + case SO_ERROR: { + if (*optlen != sizeof(int)) { + errno = EINVAL; + return -1; + } + + *(int *)optval = POINTER_TO_INT(ctx->user_data); + + return 0; + } + + case SO_TYPE: { + int type = (int)net_context_get_type(ctx); + + if (*optlen != sizeof(type)) { + errno = EINVAL; + return -1; + } + + *(int *)optval = type; + + return 0; + } + + case SO_TXTIME: + if (IS_ENABLED(CONFIG_NET_CONTEXT_TXTIME)) { + ret = net_context_get_option(ctx, + NET_OPT_TXTIME, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + break; + + case SO_PROTOCOL: { + int proto = (int)net_context_get_proto(ctx); + + if (*optlen != sizeof(proto)) { + errno = EINVAL; + return -1; + } + + *(int *)optval = proto; + + return 0; + } + + case SO_DOMAIN: { + if (*optlen != sizeof(int)) { + errno = EINVAL; + return -1; + } + + *(int *)optval = net_context_get_family(ctx); + + return 0; + } + + break; + + case SO_RCVBUF: + if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVBUF)) { + ret = net_context_get_option(ctx, + NET_OPT_RCVBUF, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + break; + + case SO_SNDBUF: + if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDBUF)) { + ret = net_context_get_option(ctx, + NET_OPT_SNDBUF, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + break; + + case SO_REUSEADDR: + if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEADDR)) { + ret = net_context_get_option(ctx, + NET_OPT_REUSEADDR, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + break; + + case SO_REUSEPORT: + if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEPORT)) { + ret = net_context_get_option(ctx, + NET_OPT_REUSEPORT, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + break; + + case SO_KEEPALIVE: + if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE) && + net_context_get_proto(ctx) == IPPROTO_TCP) { + ret = net_tcp_get_option(ctx, + TCP_OPT_KEEPALIVE, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case SO_TIMESTAMPING: + if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING)) { + ret = net_context_get_option(ctx, + NET_OPT_TIMESTAMPING, + optval, optlen); + + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + } + + break; + + case IPPROTO_TCP: + switch (optname) { + case TCP_NODELAY: + ret = net_tcp_get_option(ctx, TCP_OPT_NODELAY, optval, optlen); + return ret; + + case TCP_KEEPIDLE: + __fallthrough; + case TCP_KEEPINTVL: + __fallthrough; + case TCP_KEEPCNT: + if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE)) { + ret = net_tcp_get_option(ctx, + get_tcp_option(optname), + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + } + + break; + + case IPPROTO_IP: + switch (optname) { + case IP_TOS: + if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) { + ret = net_context_get_option(ctx, + NET_OPT_DSCP_ECN, + optval, + optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case IP_TTL: + ret = net_context_get_option(ctx, NET_OPT_TTL, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + + case IP_MULTICAST_TTL: + ret = net_context_get_option(ctx, NET_OPT_MCAST_TTL, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + if (IS_ENABLED(CONFIG_NET_IPV4_MAPPING_TO_IPV6)) { + ret = net_context_get_option(ctx, + NET_OPT_IPV6_V6ONLY, + optval, + optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case IPV6_ADDR_PREFERENCES: + if (IS_ENABLED(CONFIG_NET_IPV6)) { + ret = net_context_get_option(ctx, + NET_OPT_ADDR_PREFERENCES, + optval, + optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case IPV6_TCLASS: + if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) { + ret = net_context_get_option(ctx, + NET_OPT_DSCP_ECN, + optval, + optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case IPV6_UNICAST_HOPS: + ret = net_context_get_option(ctx, + NET_OPT_UNICAST_HOP_LIMIT, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + + case IPV6_MULTICAST_HOPS: + ret = net_context_get_option(ctx, + NET_OPT_MCAST_HOP_LIMIT, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + } + + errno = ENOPROTOOPT; + return -1; +} + +static int ipv4_multicast_group(struct net_context *ctx, const void *optval, + socklen_t optlen, bool do_join) +{ + struct ip_mreqn *mreqn; + struct net_if *iface; + int ifindex, ret; + + if (optval == NULL || optlen != sizeof(struct ip_mreqn)) { + errno = EINVAL; + return -1; + } + + mreqn = (struct ip_mreqn *)optval; + + if (mreqn->imr_multiaddr.s_addr == INADDR_ANY) { + errno = EINVAL; + return -1; + } + + if (mreqn->imr_ifindex != 0) { + iface = net_if_get_by_index(mreqn->imr_ifindex); + } else { + ifindex = net_if_ipv4_addr_lookup_by_index(&mreqn->imr_address); + iface = net_if_get_by_index(ifindex); + } + + if (iface == NULL) { + /* Check if ctx has already an interface and if not, + * then select the default interface. + */ + if (ctx->iface <= 0) { + iface = net_if_get_default(); + } else { + iface = net_if_get_by_index(ctx->iface); + } + + if (iface == NULL) { + errno = EINVAL; + return -1; + } + } + + if (do_join) { + ret = net_ipv4_igmp_join(iface, &mreqn->imr_multiaddr, NULL); + } else { + ret = net_ipv4_igmp_leave(iface, &mreqn->imr_multiaddr); + } + + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; +} + +static int ipv6_multicast_group(struct net_context *ctx, const void *optval, + socklen_t optlen, bool do_join) +{ + struct ipv6_mreq *mreq; + struct net_if *iface; + int ret; + + if (optval == NULL || optlen != sizeof(struct ipv6_mreq)) { + errno = EINVAL; + return -1; + } + + mreq = (struct ipv6_mreq *)optval; + + if (memcmp(&mreq->ipv6mr_multiaddr, + net_ipv6_unspecified_address(), + sizeof(mreq->ipv6mr_multiaddr)) == 0) { + errno = EINVAL; + return -1; + } + + iface = net_if_get_by_index(mreq->ipv6mr_ifindex); + if (iface == NULL) { + /* Check if ctx has already an interface and if not, + * then select the default interface. + */ + if (ctx->iface <= 0) { + iface = net_if_get_default(); + } else { + iface = net_if_get_by_index(ctx->iface); + } + + if (iface == NULL) { + errno = ENOENT; + return -1; + } + } + + if (do_join) { + ret = net_ipv6_mld_join(iface, &mreq->ipv6mr_multiaddr); + } else { + ret = net_ipv6_mld_leave(iface, &mreq->ipv6mr_multiaddr); + } + + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; +} + +int zsock_setsockopt_ctx(struct net_context *ctx, int level, int optname, + const void *optval, socklen_t optlen) +{ + int ret; + + switch (level) { + case SOL_SOCKET: + switch (optname) { + case SO_RCVBUF: + if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVBUF)) { + ret = net_context_set_option(ctx, + NET_OPT_RCVBUF, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case SO_SNDBUF: + if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDBUF)) { + ret = net_context_set_option(ctx, + NET_OPT_SNDBUF, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case SO_REUSEADDR: + if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEADDR)) { + ret = net_context_set_option(ctx, + NET_OPT_REUSEADDR, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case SO_REUSEPORT: + if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEPORT)) { + ret = net_context_set_option(ctx, + NET_OPT_REUSEPORT, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case SO_PRIORITY: + if (IS_ENABLED(CONFIG_NET_CONTEXT_PRIORITY)) { + ret = net_context_set_option(ctx, + NET_OPT_PRIORITY, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case SO_RCVTIMEO: + if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVTIMEO)) { + const struct zsock_timeval *tv = optval; + k_timeout_t timeout; + + if (optlen != sizeof(struct zsock_timeval)) { + errno = EINVAL; + return -1; + } + + if (tv->tv_sec == 0 && tv->tv_usec == 0) { + timeout = K_FOREVER; + } else { + timeout = K_USEC(tv->tv_sec * 1000000ULL + + tv->tv_usec); + } + + ret = net_context_set_option(ctx, + NET_OPT_RCVTIMEO, + &timeout, + sizeof(timeout)); + + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case SO_SNDTIMEO: + if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDTIMEO)) { + const struct zsock_timeval *tv = optval; + k_timeout_t timeout; + + if (optlen != sizeof(struct zsock_timeval)) { + errno = EINVAL; + return -1; + } + + if (tv->tv_sec == 0 && tv->tv_usec == 0) { + timeout = K_FOREVER; + } else { + timeout = K_USEC(tv->tv_sec * 1000000ULL + + tv->tv_usec); + } + + ret = net_context_set_option(ctx, + NET_OPT_SNDTIMEO, + &timeout, + sizeof(timeout)); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case SO_TXTIME: + if (IS_ENABLED(CONFIG_NET_CONTEXT_TXTIME)) { + ret = net_context_set_option(ctx, + NET_OPT_TXTIME, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case SO_SOCKS5: + if (IS_ENABLED(CONFIG_SOCKS)) { + ret = net_context_set_option(ctx, + NET_OPT_SOCKS5, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + net_context_set_proxy_enabled(ctx, true); + + return 0; + } + + break; + + case SO_BINDTODEVICE: { + struct net_if *iface; + const struct ifreq *ifreq = optval; + + if (net_context_get_family(ctx) != AF_INET && + net_context_get_family(ctx) != AF_INET6) { + errno = EAFNOSUPPORT; + return -1; + } + + /* optlen equal to 0 or empty interface name should + * remove the binding. + */ + if ((optlen == 0) || (ifreq != NULL && + strlen(ifreq->ifr_name) == 0)) { + ctx->flags &= ~NET_CONTEXT_BOUND_TO_IFACE; + return 0; + } + + if ((ifreq == NULL) || (optlen != sizeof(*ifreq))) { + errno = EINVAL; + return -1; + } + + if (IS_ENABLED(CONFIG_NET_INTERFACE_NAME)) { + ret = net_if_get_by_name(ifreq->ifr_name); + if (ret < 0) { + errno = -ret; + return -1; + } + + iface = net_if_get_by_index(ret); + if (iface == NULL) { + errno = ENODEV; + return -1; + } + } else { + const struct device *dev; + + dev = device_get_binding(ifreq->ifr_name); + if (dev == NULL) { + errno = ENODEV; + return -1; + } + + iface = net_if_lookup_by_dev(dev); + if (iface == NULL) { + errno = ENODEV; + return -1; + } + } + + net_context_bind_iface(ctx, iface); + + return 0; + } + + case SO_LINGER: + /* ignored. for compatibility purposes only */ + return 0; + + case SO_KEEPALIVE: + if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE) && + net_context_get_proto(ctx) == IPPROTO_TCP) { + ret = net_tcp_set_option(ctx, + TCP_OPT_KEEPALIVE, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case SO_TIMESTAMPING: + if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING)) { + ret = net_context_set_option(ctx, + NET_OPT_TIMESTAMPING, + optval, optlen); + + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + } + + break; + + case IPPROTO_TCP: + switch (optname) { + case TCP_NODELAY: + ret = net_tcp_set_option(ctx, + TCP_OPT_NODELAY, optval, optlen); + return ret; + + case TCP_KEEPIDLE: + __fallthrough; + case TCP_KEEPINTVL: + __fallthrough; + case TCP_KEEPCNT: + if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE)) { + ret = net_tcp_set_option(ctx, + get_tcp_option(optname), + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + } + break; + + case IPPROTO_IP: + switch (optname) { + case IP_TOS: + if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) { + ret = net_context_set_option(ctx, + NET_OPT_DSCP_ECN, + optval, + optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case IP_PKTINFO: + if (IS_ENABLED(CONFIG_NET_IPV4) && + IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO)) { + ret = net_context_set_option(ctx, + NET_OPT_RECV_PKTINFO, + optval, + optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case IP_MULTICAST_TTL: + ret = net_context_set_option(ctx, NET_OPT_MCAST_TTL, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + + case IP_TTL: + ret = net_context_set_option(ctx, NET_OPT_TTL, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + + case IP_ADD_MEMBERSHIP: + if (IS_ENABLED(CONFIG_NET_IPV4)) { + return ipv4_multicast_group(ctx, optval, + optlen, true); + } + + break; + + case IP_DROP_MEMBERSHIP: + if (IS_ENABLED(CONFIG_NET_IPV4)) { + return ipv4_multicast_group(ctx, optval, + optlen, false); + } + + break; + } + + break; + + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + if (IS_ENABLED(CONFIG_NET_IPV4_MAPPING_TO_IPV6)) { + ret = net_context_set_option(ctx, + NET_OPT_IPV6_V6ONLY, + optval, + optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + } + + return 0; + + case IPV6_RECVPKTINFO: + if (IS_ENABLED(CONFIG_NET_IPV6) && + IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO)) { + ret = net_context_set_option(ctx, + NET_OPT_RECV_PKTINFO, + optval, + optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case IPV6_ADDR_PREFERENCES: + if (IS_ENABLED(CONFIG_NET_IPV6)) { + ret = net_context_set_option(ctx, + NET_OPT_ADDR_PREFERENCES, + optval, + optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case IPV6_TCLASS: + if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) { + ret = net_context_set_option(ctx, + NET_OPT_DSCP_ECN, + optval, + optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + } + + break; + + case IPV6_UNICAST_HOPS: + ret = net_context_set_option(ctx, + NET_OPT_UNICAST_HOP_LIMIT, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + + case IPV6_MULTICAST_HOPS: + ret = net_context_set_option(ctx, + NET_OPT_MCAST_HOP_LIMIT, + optval, optlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + return 0; + + case IPV6_ADD_MEMBERSHIP: + if (IS_ENABLED(CONFIG_NET_IPV6)) { + return ipv6_multicast_group(ctx, optval, + optlen, true); + } + + break; + + case IPV6_DROP_MEMBERSHIP: + if (IS_ENABLED(CONFIG_NET_IPV6)) { + return ipv6_multicast_group(ctx, optval, + optlen, false); + } + + break; + } + + break; + } + + errno = ENOPROTOOPT; + return -1; +} + +int zsock_getpeername_ctx(struct net_context *ctx, struct sockaddr *addr, + socklen_t *addrlen) +{ + socklen_t newlen = 0; + + if (addr == NULL || addrlen == NULL) { + errno = EINVAL; + return -1; + } + + if (!(ctx->flags & NET_CONTEXT_REMOTE_ADDR_SET)) { + errno = ENOTCONN; + return -1; + } + + if (net_context_get_type(ctx) == SOCK_STREAM && + net_context_get_state(ctx) != NET_CONTEXT_CONNECTED) { + errno = ENOTCONN; + return -1; + } + + if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->remote.sa_family == AF_INET) { + struct sockaddr_in addr4 = { 0 }; + + addr4.sin_family = AF_INET; + addr4.sin_port = net_sin(&ctx->remote)->sin_port; + memcpy(&addr4.sin_addr, &net_sin(&ctx->remote)->sin_addr, + sizeof(struct in_addr)); + newlen = sizeof(struct sockaddr_in); + + memcpy(addr, &addr4, MIN(*addrlen, newlen)); + } else if (IS_ENABLED(CONFIG_NET_IPV6) && + ctx->remote.sa_family == AF_INET6) { + struct sockaddr_in6 addr6 = { 0 }; + + addr6.sin6_family = AF_INET6; + addr6.sin6_port = net_sin6(&ctx->remote)->sin6_port; + memcpy(&addr6.sin6_addr, &net_sin6(&ctx->remote)->sin6_addr, + sizeof(struct in6_addr)); + newlen = sizeof(struct sockaddr_in6); + + memcpy(addr, &addr6, MIN(*addrlen, newlen)); + } else { + errno = EINVAL; + return -1; + } + + *addrlen = newlen; + + return 0; +} + +int zsock_getsockname_ctx(struct net_context *ctx, struct sockaddr *addr, + socklen_t *addrlen) +{ + socklen_t newlen = 0; + int ret; + + if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->local.family == AF_INET) { + struct sockaddr_in addr4 = { 0 }; + + if (net_sin_ptr(&ctx->local)->sin_addr == NULL) { + errno = EINVAL; + return -1; + } + + newlen = sizeof(struct sockaddr_in); + + ret = net_context_get_local_addr(ctx, + (struct sockaddr *)&addr4, + &newlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + memcpy(addr, &addr4, MIN(*addrlen, newlen)); + + } else if (IS_ENABLED(CONFIG_NET_IPV6) && ctx->local.family == AF_INET6) { + struct sockaddr_in6 addr6 = { 0 }; + + if (net_sin6_ptr(&ctx->local)->sin6_addr == NULL) { + errno = EINVAL; + return -1; + } + + newlen = sizeof(struct sockaddr_in6); + + ret = net_context_get_local_addr(ctx, + (struct sockaddr *)&addr6, + &newlen); + if (ret < 0) { + errno = -ret; + return -1; + } + + memcpy(addr, &addr6, MIN(*addrlen, newlen)); + } else { + errno = EINVAL; + return -1; + } + + *addrlen = newlen; + + return 0; +} + +static ssize_t sock_read_vmeth(void *obj, void *buffer, size_t count) +{ + return zsock_recvfrom_ctx(obj, buffer, count, 0, NULL, 0); +} + +static ssize_t sock_write_vmeth(void *obj, const void *buffer, size_t count) +{ + return zsock_sendto_ctx(obj, buffer, count, 0, NULL, 0); +} + +static void zsock_ctx_set_lock(struct net_context *ctx, struct k_mutex *lock) +{ + ctx->cond.lock = lock; +} + +static int sock_ioctl_vmeth(void *obj, unsigned int request, va_list args) +{ + switch (request) { + + /* In Zephyr, fcntl() is just an alias of ioctl(). */ + case F_GETFL: + if (sock_is_nonblock(obj)) { + return O_NONBLOCK; + } + + return 0; + + case F_SETFL: { + int flags; + + flags = va_arg(args, int); + + if (flags & O_NONBLOCK) { + sock_set_flag(obj, SOCK_NONBLOCK, SOCK_NONBLOCK); + } else { + sock_set_flag(obj, SOCK_NONBLOCK, 0); + } + + return 0; + } + + case ZFD_IOCTL_POLL_PREPARE: { + struct zsock_pollfd *pfd; + struct k_poll_event **pev; + struct k_poll_event *pev_end; + + pfd = va_arg(args, struct zsock_pollfd *); + pev = va_arg(args, struct k_poll_event **); + pev_end = va_arg(args, struct k_poll_event *); + + return zsock_poll_prepare_ctx(obj, pfd, pev, pev_end); + } + + case ZFD_IOCTL_POLL_UPDATE: { + struct zsock_pollfd *pfd; + struct k_poll_event **pev; + + pfd = va_arg(args, struct zsock_pollfd *); + pev = va_arg(args, struct k_poll_event **); + + return zsock_poll_update_ctx(obj, pfd, pev); + } + + case ZFD_IOCTL_SET_LOCK: { + struct k_mutex *lock; + + lock = va_arg(args, struct k_mutex *); + + zsock_ctx_set_lock(obj, lock); + return 0; + } + + case ZFD_IOCTL_FIONBIO: + sock_set_flag(obj, SOCK_NONBLOCK, SOCK_NONBLOCK); + return 0; + + case ZFD_IOCTL_FIONREAD: { + int *avail = va_arg(args, int *); + + *avail = zsock_fionread_ctx(obj); + return 0; + } + + default: + errno = EOPNOTSUPP; + return -1; + } +} + +static int sock_shutdown_vmeth(void *obj, int how) +{ + return zsock_shutdown_ctx(obj, how); +} + +static int sock_bind_vmeth(void *obj, const struct sockaddr *addr, + socklen_t addrlen) +{ + return zsock_bind_ctx(obj, addr, addrlen); +} + +static int sock_connect_vmeth(void *obj, const struct sockaddr *addr, + socklen_t addrlen) +{ + return zsock_connect_ctx(obj, addr, addrlen); +} + +static int sock_listen_vmeth(void *obj, int backlog) +{ + return zsock_listen_ctx(obj, backlog); +} + +static int sock_accept_vmeth(void *obj, struct sockaddr *addr, + socklen_t *addrlen) +{ + return zsock_accept_ctx(obj, addr, addrlen); +} + +static ssize_t sock_sendto_vmeth(void *obj, const void *buf, size_t len, + int flags, const struct sockaddr *dest_addr, + socklen_t addrlen) +{ + return zsock_sendto_ctx(obj, buf, len, flags, dest_addr, addrlen); +} + +static ssize_t sock_sendmsg_vmeth(void *obj, const struct msghdr *msg, + int flags) +{ + return zsock_sendmsg_ctx(obj, msg, flags); +} + +static ssize_t sock_recvmsg_vmeth(void *obj, struct msghdr *msg, int flags) +{ + return zsock_recvmsg_ctx(obj, msg, flags); +} + +static ssize_t sock_recvfrom_vmeth(void *obj, void *buf, size_t max_len, + int flags, struct sockaddr *src_addr, + socklen_t *addrlen) +{ + return zsock_recvfrom_ctx(obj, buf, max_len, flags, + src_addr, addrlen); +} + +static int sock_getsockopt_vmeth(void *obj, int level, int optname, + void *optval, socklen_t *optlen) +{ + return zsock_getsockopt_ctx(obj, level, optname, optval, optlen); +} + +static int sock_setsockopt_vmeth(void *obj, int level, int optname, + const void *optval, socklen_t optlen) +{ + return zsock_setsockopt_ctx(obj, level, optname, optval, optlen); +} + +static int sock_close_vmeth(void *obj) +{ + return zsock_close_ctx(obj); +} +static int sock_getpeername_vmeth(void *obj, struct sockaddr *addr, + socklen_t *addrlen) +{ + return zsock_getpeername_ctx(obj, addr, addrlen); +} + +static int sock_getsockname_vmeth(void *obj, struct sockaddr *addr, + socklen_t *addrlen) +{ + return zsock_getsockname_ctx(obj, addr, addrlen); +} + +const struct socket_op_vtable sock_fd_op_vtable = { + .fd_vtable = { + .read = sock_read_vmeth, + .write = sock_write_vmeth, + .close = sock_close_vmeth, + .ioctl = sock_ioctl_vmeth, + }, + .shutdown = sock_shutdown_vmeth, + .bind = sock_bind_vmeth, + .connect = sock_connect_vmeth, + .listen = sock_listen_vmeth, + .accept = sock_accept_vmeth, + .sendto = sock_sendto_vmeth, + .sendmsg = sock_sendmsg_vmeth, + .recvmsg = sock_recvmsg_vmeth, + .recvfrom = sock_recvfrom_vmeth, + .getsockopt = sock_getsockopt_vmeth, + .setsockopt = sock_setsockopt_vmeth, + .getpeername = sock_getpeername_vmeth, + .getsockname = sock_getsockname_vmeth, +}; + +static bool inet_is_supported(int family, int type, int proto) +{ + if (family != AF_INET && family != AF_INET6) { + return false; + } + + return true; +} + +NET_SOCKET_REGISTER(af_inet46, NET_SOCKET_DEFAULT_PRIO, AF_UNSPEC, + inet_is_supported, zsock_socket_internal); diff --git a/subsys/net/lib/zperf/zperf_udp_receiver.c b/subsys/net/lib/zperf/zperf_udp_receiver.c index 0f7e1e4f077c..2f0d292f7ad0 100644 --- a/subsys/net/lib/zperf/zperf_udp_receiver.c +++ b/subsys/net/lib/zperf/zperf_udp_receiver.c @@ -12,6 +12,7 @@ LOG_MODULE_DECLARE(net_zperf, CONFIG_NET_ZPERF_LOG_LEVEL); #include +#include #include #include #include diff --git a/tests/net/ipv6/src/main.c b/tests/net/ipv6/src/main.c index 02df9ac650c6..1dd40afcde8c 100644 --- a/tests/net/ipv6/src/main.c +++ b/tests/net/ipv6/src/main.c @@ -19,6 +19,7 @@ LOG_MODULE_REGISTER(net_test, CONFIG_NET_IPV6_LOG_LEVEL); #include +#include #include #include #include diff --git a/tests/net/mld/src/main.c b/tests/net/mld/src/main.c index e1bcce056cfa..1c95fe5409dc 100644 --- a/tests/net/mld/src/main.c +++ b/tests/net/mld/src/main.c @@ -18,6 +18,7 @@ LOG_MODULE_REGISTER(net_test, CONFIG_NET_IPV6_LOG_LEVEL); #include +#include #include #include #include diff --git a/tests/net/offloaded_netdev/prj.conf b/tests/net/offloaded_netdev/prj.conf index ad92b9bec282..3a670f327d3d 100644 --- a/tests/net/offloaded_netdev/prj.conf +++ b/tests/net/offloaded_netdev/prj.conf @@ -7,3 +7,9 @@ CONFIG_ZTEST=y CONFIG_TEST_USERSPACE=y CONFIG_NET_OFFLOAD=y CONFIG_TEST_RANDOM_GENERATOR=y +CONFIG_NET_IPV4=y +CONFIG_NET_IPV6=y +CONFIG_NET_IPV6_NBR_CACHE=n +CONFIG_NET_IPV6_MLD=n +CONFIG_NET_IF_MAX_IPV4_COUNT=4 +CONFIG_NET_IF_MAX_IPV6_COUNT=4 diff --git a/tests/net/offloaded_netdev/src/main.c b/tests/net/offloaded_netdev/src/main.c index dfe8f8fbb14e..becc0488102f 100644 --- a/tests/net/offloaded_netdev/src/main.c +++ b/tests/net/offloaded_netdev/src/main.c @@ -19,6 +19,11 @@ #include #include +static struct in_addr test_addr_ipv4 = { { { 192, 0, 2, 1 } } }; +static struct in6_addr test_addr_ipv6 = { { { + 0x20, 0x01, 0x0d, 0xb8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x1 +} } }; + /* Dummy socket creator for socket-offloaded ifaces */ int offload_socket(int family, int type, int proto) { @@ -36,6 +41,8 @@ static void sock_offload_l2_iface_init(struct net_if *iface) */ net_if_socket_offload_set(iface, offload_socket); net_if_flag_set(iface, NET_IF_NO_AUTO_START); + net_if_flag_set(iface, NET_IF_IPV4); + net_if_flag_set(iface, NET_IF_IPV6); } /* Dummy init function for net-offloaded ifaces */ @@ -46,6 +53,8 @@ static void net_offload_l2_iface_init(struct net_if *iface) */ iface->if_dev->offload = &net_offload_api; net_if_flag_set(iface, NET_IF_NO_AUTO_START); + net_if_flag_set(iface, NET_IF_IPV4); + net_if_flag_set(iface, NET_IF_IPV6); } /* Tracks the total number of ifaces that are up (theoretically). */ @@ -327,5 +336,58 @@ ZTEST(net_offloaded_netdev, test_up_down_sock_off_impl_fail_down) "Iface under test should have failed to go up"); } +static void test_addr_add_common(struct net_if *test_iface, const char *off_type) +{ + struct net_if *lookup_iface; + struct net_if_addr *ipv4_addr; + struct net_if_addr *ipv6_addr; + + /* Bring iface up before test */ + (void)net_if_up(test_iface); + + ipv4_addr = net_if_ipv4_addr_add(test_iface, &test_addr_ipv4, + NET_ADDR_MANUAL, 0); + zassert_not_null(ipv4_addr, + "Failed to add IPv4 address to a %s offloaded interface", + off_type); + ipv6_addr = net_if_ipv6_addr_add(test_iface, &test_addr_ipv6, + NET_ADDR_MANUAL, 0); + zassert_not_null(ipv6_addr, + "Failed to add IPv6 address to a socket %s interface", + off_type); + + lookup_iface = NULL; + zassert_equal_ptr(net_if_ipv4_addr_lookup(&test_addr_ipv4, &lookup_iface), + ipv4_addr, + "Failed to find IPv4 address on a %s offloaded interface"); + zassert_equal_ptr(lookup_iface, test_iface, "Wrong interface"); + + lookup_iface = NULL; + zassert_equal_ptr(net_if_ipv6_addr_lookup(&test_addr_ipv6, &lookup_iface), + ipv6_addr, + "Failed to find IPv6 address on a %s offloaded interface"); + zassert_equal_ptr(lookup_iface, test_iface, "Wrong interface"); + + zassert_true(net_if_ipv4_addr_rm(test_iface, &test_addr_ipv4), + "Failed to remove IPv4 address from a %s offloaded interface", + off_type); + zassert_true(net_if_ipv6_addr_rm(test_iface, &test_addr_ipv6), + "Failed to remove IPv4 address from a %s offloaded interface", + off_type); +} + +ZTEST(net_offloaded_netdev, test_addr_add_sock_off_impl) +{ + struct net_if *test_iface = NET_IF_GET(sock_offload_test_impl, 0); + + test_addr_add_common(test_iface, "offloaded"); +} + +ZTEST(net_offloaded_netdev, test_addr_add_net_off_impl) +{ + struct net_if *test_iface = NET_IF_GET(net_offload_test_impl, 0); + + test_addr_add_common(test_iface, "net"); +} ZTEST_SUITE(net_offloaded_netdev, NULL, NULL, net_offloaded_netdev_before, NULL, NULL); diff --git a/tests/net/offloaded_netdev/testcase.yaml b/tests/net/offloaded_netdev/testcase.yaml index 5893d4f12c59..d51590043f9b 100644 --- a/tests/net/offloaded_netdev/testcase.yaml +++ b/tests/net/offloaded_netdev/testcase.yaml @@ -1,9 +1,12 @@ common: min_ram: 16 depends_on: netif + tags: + - net + - iface + - userspace tests: - net.offloaded_netdev: - tags: - - net - - iface - - userspace + net.offloaded_netdev: {} + net.offloaded_netdev.no_native: + extra_configs: + - CONFIG_NET_NATIVE=n