diff --git a/doc/api/networking.rst b/doc/api/networking.rst index 042dad3ff995a..aff30a4049206 100644 --- a/doc/api/networking.rst +++ b/doc/api/networking.rst @@ -108,6 +108,12 @@ Hostname Configuration Library .. doxygengroup:: net_hostname :project: Zephyr +generic Precision Time Protocol (gPTP) +************************************** + +.. doxygengroup:: gptp + :project: Zephyr + Network technologies ******************** diff --git a/doc/subsystems/networking/gptp_release_notes.rst b/doc/subsystems/networking/gptp_release_notes.rst new file mode 100644 index 0000000000000..50d96024371b8 --- /dev/null +++ b/doc/subsystems/networking/gptp_release_notes.rst @@ -0,0 +1,55 @@ +gPTP stack for Zephyr +##################### + +Overview +******** + +This gPTP stack supports the protocol and procedures as defined in +the IEEE 802.1AS-2011 standard (Timing and Syncronization for +Time-Sensitive Applications in Bridged Local Area Networks). + +Supported features +******************* + +The stack handles communications and state machines defined in the standard +IEEE802.1AS-2011. Mandatory requirements, for a full-duplex point-to-point link +endpoint, as defined in Annex A of the standard are supported. + +The stack is in principle capable of handling communications on multiple network +interfaces (also defined as "ports" in the standard) and thus act as +a 802.1AS bridge. However, this mode of operation has not been validated. + +Supported hardware +****************** + +Although the stack itself is hardware independent, ethernet frame timestamping +support must be enabled in ethernet drivers. + +Boards supported: + +- NXP FRDM-K64F +- QEMU (emulated, limited capabilities due to lack of hardware clock) + +Enabling the stack +****************** + +In menuconfig, the following configuration must me enabled: + +- CONFIG_NET_GPTP (Networking -> Link layer and IP networking support -> IP stack -> Link layer options -> Enable IEEE802.1AS support) + +Application interfaces +********************** + +Only two Application Interfaces as defined in section 9 of the standard +are available: + +- ClockTargetPhaseDiscontinuity interface (gptp_event_capture) +- ClockTargetEventCapture interface (gptp_register_phase_dis_cb) + +Function prototypes can be found in "include/net/gptp.h". + +Testing +******* + +The stack has been informally tested using the OpenAVB gPTP and +Linux ptp4l daemons. diff --git a/drivers/CMakeLists.txt b/drivers/CMakeLists.txt index 4cfb68b502c22..1ed7d43b26cf0 100644 --- a/drivers/CMakeLists.txt +++ b/drivers/CMakeLists.txt @@ -31,3 +31,4 @@ add_subdirectory_ifdef(CONFIG_NETWORKING net) add_subdirectory_ifdef(CONFIG_NET_L2_ETHERNET ethernet) add_subdirectory_ifdef(CONFIG_ENTROPY_GENERATOR entropy) add_subdirectory_ifdef(CONFIG_SYS_CLOCK_EXISTS timer) +add_subdirectory_ifdef(CONFIG_NET_GPTP ptp_clock) diff --git a/drivers/Kconfig b/drivers/Kconfig index 0cd0c65746385..2a4006799302c 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -73,4 +73,6 @@ source "drivers/display/Kconfig" source "drivers/led_strip/Kconfig" +source "drivers/ptp_clock/Kconfig" + endmenu diff --git a/drivers/ethernet/CMakeLists.txt b/drivers/ethernet/CMakeLists.txt index bc780af00b999..1498a0b5dc4a8 100644 --- a/drivers/ethernet/CMakeLists.txt +++ b/drivers/ethernet/CMakeLists.txt @@ -1,3 +1,5 @@ +zephyr_library_include_directories(${ZEPHYR_BASE}/subsys/net/ip/l2) + zephyr_sources_ifdef(CONFIG_ETH_SAM_GMAC eth_sam_gmac.c phy_sam_gmac.c diff --git a/drivers/ethernet/Kconfig.mcux b/drivers/ethernet/Kconfig.mcux index 0b9a271e6ebd0..54fd7e1361ef8 100644 --- a/drivers/ethernet/Kconfig.mcux +++ b/drivers/ethernet/Kconfig.mcux @@ -100,4 +100,39 @@ config ETH_MCUX_0_MAC5 help This is the byte 5 of the MAC address. endif + +config PTP_CLOCK_MCUX + bool "MCUX PTP clock driver support" + default n + default y if NET_GPTP + select PTP_CLOCK + depends on NET_GPTP + help + Enable MCUX PTP clock support. + +if PTP_CLOCK_MCUX + +config ETH_MCUX_PTP_RX_BUFFERS + int "Size of the RX timestamp ring buffer" + default 10 + help + Set the number of RX buffers provided to the MCUX driver + to store timestamps. + +config ETH_MCUX_PTP_TX_BUFFERS + int "Size of the TX timestamp ring buffer" + default 10 + help + Set the number of TX buffers provided to the MCUX driver + to store timestamps. + +config ETH_MCUX_PTP_CLOCK_SRC_HZ + int "Frequency of the clock source for the PTP timer" + default 50000000 + help + Set the frequency in Hz sourced to the PTP timer. + If the value is set properly, the timer will be accurate. + +endif # PTP_CLOCK_MCUX + endif diff --git a/drivers/ethernet/Kconfig.native_posix b/drivers/ethernet/Kconfig.native_posix index da727468ca20e..3cfbb187e2b21 100644 --- a/drivers/ethernet/Kconfig.native_posix +++ b/drivers/ethernet/Kconfig.native_posix @@ -43,6 +43,14 @@ config ETH_NATIVE_POSIX_DEV_NAME help This option sets the TUN/TAP device name in your host system. +config ETH_NATIVE_POSIX_PTP_CLOCK + bool "PTP clock driver support" + default n + select PTP_CLOCK + depends on NET_GPTP + help + Enable PTP clock support. + config ETH_NATIVE_POSIX_RANDOM_MAC bool "Random MAC address" depends on ENTROPY_GENERATOR diff --git a/drivers/ethernet/eth_dw.c b/drivers/ethernet/eth_dw.c index c74780b19e700..8d83fef7ae6f7 100644 --- a/drivers/ethernet/eth_dw.c +++ b/drivers/ethernet/eth_dw.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -327,6 +328,8 @@ static void eth_initialize(struct net_if *iface) if (r < 0) { SYS_LOG_ERR("Could not initialize ethernet device: %d", r); } + + ethernet_init(iface); } /* Bindings to the plaform */ diff --git a/drivers/ethernet/eth_enc28j60.c b/drivers/ethernet/eth_enc28j60.c index 5e20ad636a733..b104ad5b34494 100644 --- a/drivers/ethernet/eth_enc28j60.c +++ b/drivers/ethernet/eth_enc28j60.c @@ -703,6 +703,8 @@ static void eth_enc28j60_iface_init_0(struct net_if *iface) net_if_set_link_addr(iface, mac_address_0, sizeof(mac_address_0), NET_LINK_ETHERNET); context->iface = iface; + + ethernet_init(iface); } static const struct ethernet_api api_funcs_0 = { diff --git a/drivers/ethernet/eth_mcux.c b/drivers/ethernet/eth_mcux.c index 36603a7ae2680..179f07f16a38e 100644 --- a/drivers/ethernet/eth_mcux.c +++ b/drivers/ethernet/eth_mcux.c @@ -2,6 +2,7 @@ * * Copyright (c) 2016-2017 ARM Ltd * Copyright (c) 2016 Linaro Ltd + * Copyright (c) 2018 Intel Coporation * * SPDX-License-Identifier: Apache-2.0 */ @@ -24,6 +25,11 @@ #include #include +#if defined(CONFIG_PTP_CLOCK_MCUX) +#include +#include +#endif + #include "fsl_enet.h" #include "fsl_phy.h" #include "fsl_port.h" @@ -59,9 +65,23 @@ static const char *phy_state_name(enum eth_mcux_phy_state state) return name[state]; } -struct eth_context { +struct ifaces { struct net_if *iface; + u16_t vlan_tag; +}; + +struct eth_context { + /* If VLAN is enabled, there can be multiple VLAN interfaces related to + * this physical device. Interface index is used as an index to this + * array. If VLAN support is disabled, then there is only one element + * in this array. + */ + struct ifaces ifaces[NET_VLAN_MAX_COUNT]; enet_handle_t enet_handle; +#if defined(CONFIG_PTP_CLOCK_MCUX) + enet_ptp_config_t ptp_config; + float clk_ratio; +#endif struct k_sem tx_buf_sem; enum eth_mcux_phy_state phy_state; bool enabled; @@ -97,11 +117,25 @@ rx_buffer_desc[CONFIG_ETH_MCUX_RX_BUFFERS]; static enet_tx_bd_struct_t __aligned(ENET_BUFF_ALIGNMENT) tx_buffer_desc[CONFIG_ETH_MCUX_TX_BUFFERS]; +#if defined(CONFIG_NET_PKT_TIMESTAMP) +/* Packets to be timestamped. */ +static struct net_pkt *ts_tx_pkt[CONFIG_ETH_MCUX_TX_BUFFERS]; +static int ts_tx_rd, ts_tx_wr; +#endif + /* Use ENET_FRAME_MAX_VALNFRAMELEN for VLAN frame size * Use ENET_FRAME_MAX_FRAMELEN for ethernet frame size */ +#if defined(CONFIG_NET_VLAN) +#if !defined(ENET_FRAME_MAX_VALNFRAMELEN) +#define ENET_FRAME_MAX_VALNFRAMELEN (ENET_FRAME_MAX_FRAMELEN + 4) +#endif +#define ETH_MCUX_BUFFER_SIZE \ + ROUND_UP(ENET_FRAME_MAX_VALNFRAMELEN, ENET_BUFF_ALIGNMENT) +#else #define ETH_MCUX_BUFFER_SIZE \ ROUND_UP(ENET_FRAME_MAX_FRAMELEN, ENET_BUFF_ALIGNMENT) +#endif /* CONFIG_NET_VLAN */ static u8_t __aligned(ENET_BUFF_ALIGNMENT) rx_buffer[CONFIG_ETH_MCUX_RX_BUFFERS][ETH_MCUX_BUFFER_SIZE]; @@ -133,6 +167,25 @@ static void eth_mcux_decode_duplex_and_speed(u32_t status, } } +static struct net_if *get_iface(struct eth_context *ctx, u16_t vlan_tag) +{ +#if defined(CONFIG_NET_VLAN) + int i; + + for (i = 0; i < ARRAY_SIZE(ctx->ifaces); i++) { + if (ctx->ifaces[i].vlan_tag == vlan_tag) { + return ctx->ifaces[i].iface; + } + } + + return NULL; +#else + ARG_UNUSED(vlan_tag); + + return ctx->ifaces[0].iface; +#endif +} + static void eth_mcux_phy_enter_reset(struct eth_context *context) { const u32_t phy_addr = 0; @@ -310,6 +363,83 @@ static void eth_mcux_delayed_phy_work(struct k_work *item) eth_mcux_phy_event(context); } +#if defined(CONFIG_PTP_CLOCK_MCUX) +static enet_ptp_time_data_t ptp_rx_buffer[CONFIG_ETH_MCUX_PTP_RX_BUFFERS]; +static enet_ptp_time_data_t ptp_tx_buffer[CONFIG_ETH_MCUX_PTP_TX_BUFFERS]; + +static bool eth_get_ptp_data(struct net_if *iface, struct net_pkt *pkt, + enet_ptp_time_data_t *ptpTsData) +{ + struct gptp_hdr *hdr; + +#if defined(CONFIG_NET_VLAN) + struct net_eth_vlan_hdr *hdr_vlan; + struct ethernet_context *eth_ctx; + bool vlan_enabled = false; + + eth_ctx = net_if_l2_data(iface); + if (net_eth_is_vlan_enabled(eth_ctx, iface)) { + hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); + vlan_enabled = true; + + if (ntohs(hdr_vlan->type) != NET_ETH_PTYPE_PTP) { + return false; + } + } else +#endif + { + if (ntohs(NET_ETH_HDR(pkt)->type) != NET_ETH_PTYPE_PTP) { + return false; + } + } + + net_pkt_set_priority(pkt, NET_PRIORITY_CA); + + if (ptpTsData) { + + /* Cannot use GPTP_HDR as net_pkt fields are not all filled */ + +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + hdr = (struct gptp_hdr *)((u8_t *)net_pkt_ll(pkt) + + sizeof(struct net_eth_vlan_hdr)); + } else +#endif + { + hdr = (struct gptp_hdr *)((u8_t *)net_pkt_ll(pkt) + + sizeof(struct net_eth_hdr)); + } + + ptpTsData->version = hdr->ptp_version; + memcpy(ptpTsData->sourcePortId, &hdr->port_id, + kENET_PtpSrcPortIdLen); + ptpTsData->messageType = hdr->message_type; + ptpTsData->sequenceId = ntohs(hdr->sequence_id); + +#ifdef CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG + SYS_LOG_DBG("PTP packet: ver %d type %d len %d " + "clk %02x%02x%02x%02x%02x%02x%02x%02x port %d " + "seq %d", + ptpTsData->version, + ptpTsData->messageType, + ntohs(hdr->message_length), + hdr->port_id.clk_id[0], + hdr->port_id.clk_id[1], + hdr->port_id.clk_id[2], + hdr->port_id.clk_id[3], + hdr->port_id.clk_id[4], + hdr->port_id.clk_id[5], + hdr->port_id.clk_id[6], + hdr->port_id.clk_id[7], + ntohs(hdr->port_id.port_number), + ptpTsData->sequenceId); +#endif + } + + return true; +} +#endif /* CONFIG_PTP_CLOCK_MCUX */ + static int eth_tx(struct net_if *iface, struct net_pkt *pkt) { struct eth_context *context = net_if_get_device(iface)->driver_data; @@ -317,6 +447,9 @@ static int eth_tx(struct net_if *iface, struct net_pkt *pkt) u8_t *dst; status_t status; unsigned int imask; +#if defined(CONFIG_NET_PKT_TIMESTAMP) + bool timestamped_frame; +#endif u16_t total_len = net_pkt_ll_reserve(pkt) + net_pkt_get_len(pkt); @@ -345,9 +478,35 @@ static int eth_tx(struct net_if *iface, struct net_pkt *pkt) frag = frag->frags; } + /* FIXME: Dirty workaround. + * With current implementation of ENET_StoreTxFrameTime in the MCUX + * library, a frame may not be timestamped when a non-timestamped frame + * is sent. + */ +#ifdef ENET_ENHANCEDBUFFERDESCRIPTOR_MODE + context->enet_handle.txBdDirtyTime[0] = + context->enet_handle.txBdCurrent[0]; +#endif + status = ENET_SendFrame(ENET, &context->enet_handle, context->frame_buf, total_len); +#if defined(CONFIG_NET_PKT_TIMESTAMP) + timestamped_frame = eth_get_ptp_data(iface, pkt, NULL); + if (timestamped_frame) { + if (!status) { + ts_tx_pkt[ts_tx_wr] = net_pkt_ref(pkt); + } else { + ts_tx_pkt[ts_tx_wr] = NULL; + } + + ts_tx_wr++; + if (ts_tx_wr >= CONFIG_ETH_MCUX_TX_BUFFERS) { + ts_tx_wr = 0; + } + } +#endif + irq_unlock(imask); if (status) { @@ -356,6 +515,7 @@ static int eth_tx(struct net_if *iface, struct net_pkt *pkt) } net_pkt_unref(pkt); + return 0; } @@ -368,6 +528,11 @@ static void eth_rx(struct device *iface) u32_t frame_length = 0; status_t status; unsigned int imask; + u16_t vlan_tag = NET_VLAN_TAG_UNSPEC; + +#if defined(CONFIG_NET_PKT_TIMESTAMP) + enet_ptp_time_data_t ptpTimeData; +#endif status = ENET_GetRxFrameSize(&context->enet_handle, (uint32_t *)&frame_length); @@ -458,13 +623,99 @@ static void eth_rx(struct device *iface) frame_length -= frag_len; } while (frame_length > 0); +#if defined(CONFIG_NET_VLAN) + { + struct net_eth_hdr *hdr = NET_ETH_HDR(pkt); + + if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) { + struct net_eth_vlan_hdr *hdr_vlan = + (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); + + net_pkt_set_vlan_tci(pkt, ntohs(hdr_vlan->vlan.tci)); + vlan_tag = net_pkt_vlan_tag(pkt); + +#if CONFIG_NET_TC_RX_COUNT > 1 + { + enum net_priority prio; + + prio = net_vlan2priority( + net_pkt_vlan_priority(pkt)); + net_pkt_set_priority(pkt, prio); + } +#endif + } + } +#endif + +#if defined(CONFIG_NET_PKT_TIMESTAMP) + if (eth_get_ptp_data(get_iface(context, vlan_tag), pkt, + &ptpTimeData) && + (ENET_GetRxFrameTime(&context->enet_handle, + &ptpTimeData) == kStatus_Success)) { + struct net_ptp_time timestamp = { + .second = ptpTimeData.timeStamp.second, + .nanosecond = ptpTimeData.timeStamp.nanosecond, + }; + + net_pkt_set_timestamp(pkt, ×tamp); + } else { + /* Invalid value. */ + struct net_ptp_time timestamp = { + .second = UINT64_MAX, + .nanosecond = UINT32_MAX, + }; + + net_pkt_set_timestamp(pkt, ×tamp); + } +#endif /* CONFIG_NET_PKT_TIMESTAMP */ + irq_unlock(imask); - if (net_recv_data(context->iface, pkt) < 0) { + if (net_recv_data(get_iface(context, vlan_tag), pkt) < 0) { net_pkt_unref(pkt); } } +#if defined(CONFIG_NET_PKT_TIMESTAMP) +static inline void ts_register_tx_event(struct eth_context *context) +{ + struct net_pkt *pkt; + enet_ptp_time_data_t timeData; + + pkt = ts_tx_pkt[ts_tx_rd]; + if (pkt && pkt->ref > 0) { + if (eth_get_ptp_data(net_pkt_iface(pkt), pkt, &timeData)) { + int status; + + status = ENET_GetTxFrameTime(&context->enet_handle, + &timeData); + if (status == kStatus_Success) { + struct net_ptp_time timestamp = { + .second = timeData.timeStamp.second, + .nanosecond = timeData.timeStamp.nanosecond, + }; + + net_pkt_set_timestamp(pkt, ×tamp); + + net_if_add_tx_timestamp(pkt); + } + } + + net_pkt_unref(pkt); + } else { + if (IS_ENABLED(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG) && pkt) { + SYS_LOG_ERR("pkt %p already freed", pkt); + } + } + + ts_tx_pkt[ts_tx_rd++] = NULL; + + if (ts_tx_rd >= CONFIG_ETH_MCUX_TX_BUFFERS) { + ts_tx_rd = 0; + } +} +#endif + static void eth_callback(ENET_Type *base, enet_handle_t *handle, enet_event_t event, void *param) { @@ -476,6 +727,11 @@ static void eth_callback(ENET_Type *base, enet_handle_t *handle, eth_rx(iface); break; case kENET_TxEvent: +#if defined(CONFIG_NET_PKT_TIMESTAMP) + /* Register event */ + ts_register_tx_event(context); +#endif /* CONFIG_NET_PKT_TIMESTAMP */ + /* Free the TX buffer. */ k_sem_give(&context->tx_buf_sem); break; @@ -487,6 +743,8 @@ static void eth_callback(ENET_Type *base, enet_handle_t *handle, break; case kENET_TimeStampEvent: /* Time stamp event. */ + /* Reset periodic timer to default value. */ + ENET->ATPER = NSEC_PER_SEC; break; case kENET_TimeStampAvailEvent: /* Time stamp available event. */ @@ -523,6 +781,15 @@ static int eth_0_init(struct device *dev) .rxBufferAlign = rx_buffer[0], .txBufferAlign = tx_buffer[0], }; +#if defined(CONFIG_PTP_CLOCK_MCUX) + u8_t ptp_multicast[6] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x0E }; +#endif + +#if defined(CONFIG_NET_PKT_TIMESTAMP) + ts_tx_rd = 0; + ts_tx_wr = 0; + memset(ts_tx_pkt, 0, sizeof(ts_tx_pkt)); +#endif k_sem_init(&context->tx_buf_sem, CONFIG_ETH_MCUX_TX_BUFFERS, CONFIG_ETH_MCUX_TX_BUFFERS); @@ -545,6 +812,10 @@ static int eth_0_init(struct device *dev) generate_mac(context->mac_addr); #endif +#if defined(CONFIG_NET_VLAN) + enet_config.macSpecialConfig |= kENET_ControlVLANTagEnable; +#endif + ENET_Init(ENET, &context->enet_handle, &enet_config, @@ -552,6 +823,22 @@ static int eth_0_init(struct device *dev) context->mac_addr, sys_clock); +#if defined(CONFIG_PTP_CLOCK_MCUX) + ENET_AddMulticastGroup(ENET, ptp_multicast); + + context->ptp_config.ptpTsRxBuffNum = CONFIG_ETH_MCUX_PTP_RX_BUFFERS; + context->ptp_config.ptpTsTxBuffNum = CONFIG_ETH_MCUX_PTP_TX_BUFFERS; + context->ptp_config.rxPtpTsData = ptp_rx_buffer; + context->ptp_config.txPtpTsData = ptp_tx_buffer; + context->ptp_config.channel = kENET_PtpTimerChannel1; + context->ptp_config.ptp1588ClockSrc_Hz = + CONFIG_ETH_MCUX_PTP_CLOCK_SRC_HZ; + context->clk_ratio = 1.0; + + ENET_Ptp1588Configure(ENET, &context->enet_handle, + &context->ptp_config); +#endif + ENET_SetSMI(ENET, sys_clock, false); SYS_LOG_DBG("MAC %02x:%02x:%02x:%02x:%02x:%02x", @@ -588,6 +875,7 @@ static void eth_0_iface_init(struct net_if *iface) { struct device *dev = net_if_get_device(iface); struct eth_context *context = dev->driver_data; + int idx; #if defined(CONFIG_NET_IPV6) static struct net_if_mcast_monitor mon; @@ -598,14 +886,66 @@ static void eth_0_iface_init(struct net_if *iface) net_if_set_link_addr(iface, context->mac_addr, sizeof(context->mac_addr), NET_LINK_ETHERNET); - context->iface = iface; + + idx = net_if_get_by_iface(iface); + if (idx > ARRAY_SIZE(context->ifaces)) { + SYS_LOG_ERR("Invalid interface %p index %d", iface, idx); + } else { + context->ifaces[idx].iface = iface; + } + + ethernet_init(iface); +} + +#if defined(CONFIG_NET_VLAN) +static int vlan_setup(struct net_if *iface, u16_t tag, bool enable) +{ + struct device *dev = net_if_get_device(iface); + struct eth_context *context = dev->driver_data; + int idx; + + if (tag == NET_VLAN_TAG_UNSPEC) { + return -EBADF; + } + + idx = net_if_get_by_iface(iface); + + if (enable) { + /* Enabling VLAN, check if we already have this setup */ + if (context->ifaces[idx].vlan_tag == tag) { + return -EALREADY; + } + + context->ifaces[idx].iface = iface; + context->ifaces[idx].vlan_tag = tag; + } else { + context->ifaces[idx].iface = NULL; + context->ifaces[idx].vlan_tag = NET_VLAN_TAG_UNSPEC; + } + + return 0; } +#endif static const struct ethernet_api api_funcs_0 = { .iface_api.init = eth_0_iface_init, .iface_api.send = eth_tx, + +#if defined(CONFIG_NET_VLAN) + .vlan_setup = vlan_setup, +#endif }; +#if defined(CONFIG_PTP_CLOCK_MCUX) +static void eth_mcux_ptp_isr(void *p) +{ + struct device *dev = p; + struct eth_context *context = dev->driver_data; + + ENET_Ptp1588TimerIRQHandler(ENET, &context->enet_handle); +} +#endif + static void eth_mcux_rx_isr(void *p) { struct device *dev = p; @@ -650,10 +990,9 @@ static struct eth_context eth_0_context = { } }; -NET_DEVICE_INIT(eth_mcux_0, CONFIG_ETH_MCUX_0_NAME, - eth_0_init, ð_0_context, - NULL, CONFIG_ETH_INIT_PRIORITY, &api_funcs_0, - ETHERNET_L2, NET_L2_GET_CTX_TYPE(ETHERNET_L2), 1500); +ETH_NET_DEVICE_INIT(eth_mcux_0, CONFIG_ETH_MCUX_0_NAME, eth_0_init, + ð_0_context, NULL, CONFIG_ETH_INIT_PRIORITY, + &api_funcs_0, 1500); static void eth_0_config_func(void) { @@ -668,4 +1007,121 @@ static void eth_0_config_func(void) IRQ_CONNECT(IRQ_ETH_ERR_MISC, CONFIG_ETH_MCUX_0_IRQ_PRI, eth_mcux_error_isr, DEVICE_GET(eth_mcux_0), 0); irq_enable(IRQ_ETH_ERR_MISC); + +#if defined(CONFIG_PTP_CLOCK_MCUX) + IRQ_CONNECT(IRQ_ETH_IEEE1588_TMR, CONFIG_ETH_MCUX_0_IRQ_PRI, + eth_mcux_ptp_isr, DEVICE_GET(eth_mcux_0), 0); + irq_enable(IRQ_ETH_IEEE1588_TMR); +#endif } + +#if defined(CONFIG_PTP_CLOCK_MCUX) +static int ptp_clock_mcux_set(struct ptp_clock *clk, struct net_ptp_time *tm) +{ + struct eth_context *context = clk->dev->driver_data; + enet_ptp_time_t enet_time; + + enet_time.second = tm->second; + enet_time.nanosecond = tm->nanosecond; + + ENET_Ptp1588SetTimer(ENET, &context->enet_handle, &enet_time); + return 0; +} + +static int ptp_clock_mcux_get(struct ptp_clock *clk, struct net_ptp_time *tm) +{ + struct eth_context *context = clk->dev->driver_data; + enet_ptp_time_t enet_time; + + ENET_Ptp1588GetTimer(ENET, &context->enet_handle, &enet_time); + + tm->second = enet_time.second; + tm->nanosecond = enet_time.nanosecond; + return 0; +} + +static int ptp_clock_mcux_adjust(struct ptp_clock *clk, int increment) +{ + int key, ret; + + ARG_UNUSED(clk); + + if ((increment <= -NSEC_PER_SEC) || (increment >= NSEC_PER_SEC)) { + ret = -EINVAL; + } else { + key = irq_lock(); + if (ENET->ATPER != NSEC_PER_SEC) { + ret = -EBUSY; + } else { + /* Seconds counter is handled by software. Change the + * period of one software second to adjust the clock. + */ + ENET->ATPER = NSEC_PER_SEC - increment; + ret = 0; + } + irq_unlock(key); + } + + return ret; +} + +static int ptp_clock_mcux_rate_adjust(struct ptp_clock *clk, float ratio) +{ + const int hw_inc = NSEC_PER_SEC / CONFIG_ETH_MCUX_PTP_CLOCK_SRC_HZ; + struct eth_context *context = clk->dev->driver_data; + int corr; + s32_t mul; + float val; + + /* No change needed. */ + if (ratio == 1.0) { + return 0; + } + + ratio *= context->clk_ratio; + + /* Limit possible ratio. */ + if ((ratio > 1.0 + 1.0/(2 * hw_inc)) || + (ratio < 1.0 - 1.0/(2 * hw_inc))) { + return -EINVAL; + } + + /* Save new ratio. */ + context->clk_ratio = ratio; + + if (ratio < 1.0) { + corr = hw_inc - 1; + val = 1.0 / (hw_inc * (1.0 - ratio)); + } else if (ratio > 1.0) { + corr = hw_inc + 1; + val = 1.0 / (hw_inc * (ratio-1.0)); + } else { + val = 0; + corr = hw_inc; + } + + if (val >= INT32_MAX) { + /* Value is too high. + * It is not possible to adjust the rate of the clock. + */ + mul = 0; + } else { + mul = val; + } + + + ENET_Ptp1588AdjustTimer(ENET, corr, mul); + + return 0; +} + +static const struct ptp_clock_driver_api api = { + .set = ptp_clock_mcux_set, + .get = ptp_clock_mcux_get, + .adjust = ptp_clock_mcux_adjust, + .rate_adjust = ptp_clock_mcux_rate_adjust, +}; + +PTP_CLOCK_DEVICE_INIT(eth_mcux_0, api); + +#endif /* CONFIG_PTP_CLOCK_MCUX */ diff --git a/drivers/ethernet/eth_native_posix.c b/drivers/ethernet/eth_native_posix.c index a239dd2d90fce..f1b53a4753b89 100644 --- a/drivers/ethernet/eth_native_posix.c +++ b/drivers/ethernet/eth_native_posix.c @@ -28,26 +28,45 @@ #include #include +#if defined(CONFIG_ETH_NATIVE_POSIX_PTP_CLOCK) +#include +#include +#endif + #include "eth_native_posix_priv.h" +#include "eth_stats.h" #if defined(CONFIG_NET_L2_ETHERNET) -#define _ETH_L2_LAYER ETHERNET_L2 -#define _ETH_L2_CTX_TYPE NET_L2_GET_CTX_TYPE(ETHERNET_L2) #define _ETH_MTU 1500 #endif #define NET_BUF_TIMEOUT MSEC(10) +#if defined(CONFIG_NET_VLAN) +#define ETH_HDR_LEN sizeof(struct net_eth_vlan_hdr) +#else +#define ETH_HDR_LEN sizeof(struct net_eth_hdr) +#endif + +struct ifaces { + struct net_if *iface; + u16_t vlan_tag; +}; + struct eth_context { - u8_t recv[_ETH_MTU + sizeof(struct net_eth_hdr)]; - u8_t send[_ETH_MTU + sizeof(struct net_eth_hdr)]; + struct ifaces ifaces[NET_VLAN_MAX_COUNT]; + u8_t recv[_ETH_MTU + ETH_HDR_LEN]; + u8_t send[_ETH_MTU + ETH_HDR_LEN]; u8_t mac_addr[6]; struct net_linkaddr ll_addr; - struct net_if *iface; const char *if_name; int dev_fd; bool init_done; bool status; + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + struct net_stats_eth stats; +#endif }; NET_STACK_DEFINE(RX_ZETH, eth_rx_stack, @@ -63,6 +82,107 @@ static struct eth_context *get_context(struct net_if *iface) return net_if_get_device(iface)->driver_data; } +#if defined(CONFIG_NET_GPTP) +static bool need_timestamping(struct gptp_hdr *hdr) +{ + switch (hdr->message_type) { + case GPTP_SYNC_MESSAGE: + case GPTP_PATH_DELAY_RESP_MESSAGE: + return true; + default: + return false; + } +} + +static struct gptp_hdr *check_gptp_msg(struct net_if *iface, + struct net_pkt *pkt) +{ + struct ethernet_context *eth_ctx; + struct gptp_hdr *gptp_hdr; + u8_t *msg_start; + + if (net_pkt_ll_reserve(pkt)) { + msg_start = net_pkt_ll(pkt); + } else { + msg_start = net_pkt_ip_data(pkt); + } + +#if defined(CONFIG_NET_VLAN) + eth_ctx = net_if_l2_data(iface); + if (net_eth_is_vlan_enabled(eth_ctx, iface)) { + struct net_eth_vlan_hdr *hdr_vlan; + + hdr_vlan = (struct net_eth_vlan_hdr *)msg_start; + if (ntohs(hdr_vlan->type) != NET_ETH_PTYPE_PTP) { + return NULL; + } + + gptp_hdr = (struct gptp_hdr *)(msg_start + + sizeof(struct net_eth_vlan_hdr)); + } else +#endif + { + struct net_eth_hdr *hdr; + + hdr = (struct net_eth_hdr *)msg_start; + if (ntohs(hdr->type) != NET_ETH_PTYPE_PTP) { + return NULL; + } + + gptp_hdr = (struct gptp_hdr *)(msg_start + + sizeof(struct net_eth_hdr)); + } + + return gptp_hdr; +} + +static void update_pkt_priority(struct gptp_hdr *hdr, struct net_pkt *pkt) +{ + switch (hdr->message_type) { + case GPTP_SYNC_MESSAGE: + case GPTP_DELAY_REQ_MESSAGE: + case GPTP_PATH_DELAY_REQ_MESSAGE: + case GPTP_PATH_DELAY_RESP_MESSAGE: + net_pkt_set_priority(pkt, NET_PRIORITY_CA); + break; + default: + net_pkt_set_priority(pkt, NET_PRIORITY_IC); + break; + } +} + +static void update_gptp(struct net_if *iface, struct net_pkt *pkt, + bool send) +{ + struct net_ptp_time timestamp; + struct gptp_hdr *hdr; + int ret; + + ret = eth_clock_gettime(×tamp); + if (ret < 0) { + return; + } + + net_pkt_set_timestamp(pkt, ×tamp); + + hdr = check_gptp_msg(iface, pkt); + if (!hdr) { + return; + } + + if (send) { + ret = need_timestamping(hdr); + if (ret) { + net_if_add_tx_timestamp(pkt); + } + } else { + update_pkt_priority(hdr, pkt); + } +} +#else +#define update_gptp(iface, pkt, send) +#endif /* CONFIG_NET_GPTP */ + static int eth_send(struct net_if *iface, struct net_pkt *pkt) { struct eth_context *ctx = get_context(iface); @@ -82,12 +202,28 @@ static int eth_send(struct net_if *iface, struct net_pkt *pkt) frag = frag->frags; } - net_pkt_unref(pkt); + eth_stats_update_bytes_tx(iface, count); + eth_stats_update_pkts_tx(iface); + + if (IS_ENABLED(CONFIG_NET_STATISTICS_ETHERNET)) { + if (net_eth_is_addr_broadcast( + &((struct net_eth_hdr *)NET_ETH_HDR(pkt))->dst)) { + eth_stats_update_broadcast_tx(iface); + } else if (net_eth_is_addr_multicast( + &((struct net_eth_hdr *) + NET_ETH_HDR(pkt))->dst)) { + eth_stats_update_multicast_tx(iface); + } + } + + update_gptp(iface, pkt, true); SYS_LOG_DBG("Send pkt %p len %d", pkt, count); eth_write_data(ctx->dev_fd, ctx->send, count); + net_pkt_unref(pkt); + return 0; } @@ -105,10 +241,33 @@ static struct net_linkaddr *eth_get_mac(struct eth_context *ctx) return &ctx->ll_addr; } +static struct net_if *get_iface(struct eth_context *ctx, + u16_t vlan_tag) +{ +#if defined(CONFIG_NET_VLAN) + int i; + + for (i = 0; i < ARRAY_SIZE(ctx->ifaces); i++) { + if (ctx->ifaces[i].vlan_tag == vlan_tag) { + return ctx->ifaces[i].iface; + } + } + + return NULL; +#else + ARG_UNUSED(vlan_tag); + + return ctx->ifaces[0].iface; +#endif +} + static int read_data(struct eth_context *ctx, int fd) { + u16_t vlan_tag = NET_VLAN_TAG_UNSPEC; + struct net_if *iface; struct net_pkt *pkt; struct net_buf *frag; + u32_t pkt_len; int ret; ret = eth_read_data(fd, ctx->recv, sizeof(ctx->recv)); @@ -138,9 +297,51 @@ static int read_data(struct eth_context *ctx, int fd) count += frag->len; } while (ret > 0); - SYS_LOG_DBG("Recv pkt %p len %d", pkt, net_pkt_get_len(pkt)); +#if defined(CONFIG_NET_VLAN) + { + struct net_eth_hdr *hdr = NET_ETH_HDR(pkt); + + if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) { + struct net_eth_vlan_hdr *hdr_vlan = + (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); + + net_pkt_set_vlan_tci(pkt, ntohs(hdr_vlan->vlan.tci)); + vlan_tag = net_pkt_vlan_tag(pkt); + } + +#if CONFIG_NET_TC_RX_COUNT > 1 + { + enum net_priority prio; + + prio = net_vlan2priority(net_pkt_vlan_priority(pkt)); + net_pkt_set_priority(pkt, prio); + } +#endif + } +#endif + + iface = get_iface(ctx, vlan_tag); + pkt_len = net_pkt_get_len(pkt); + + eth_stats_update_bytes_rx(iface, pkt_len); + eth_stats_update_pkts_rx(iface); + + if (IS_ENABLED(CONFIG_NET_STATISTICS_ETHERNET)) { + if (net_eth_is_addr_broadcast( + &((struct net_eth_hdr *)NET_ETH_HDR(pkt))->dst)) { + eth_stats_update_broadcast_rx(iface); + } else if (net_eth_is_addr_multicast( + &((struct net_eth_hdr *) + NET_ETH_HDR(pkt))->dst)) { + eth_stats_update_multicast_rx(iface); + } + } + + SYS_LOG_DBG("Recv pkt %p len %d", pkt, pkt_len); + + update_gptp(iface, pkt, false); - if (net_recv_data(ctx->iface, pkt) < 0) { + if (net_recv_data(iface, pkt) < 0) { net_pkt_unref(pkt); } @@ -154,7 +355,7 @@ static void eth_rx(struct eth_context *ctx) SYS_LOG_DBG("Starting ZETH RX thread"); while (1) { - if (net_if_is_up(ctx->iface)) { + if (net_if_is_up(ctx->ifaces[0].iface)) { ret = eth_wait_data(ctx->dev_fd); if (!ret) { read_data(ctx, ctx->dev_fd); @@ -178,13 +379,15 @@ static void eth_iface_init(struct net_if *iface) { struct eth_context *ctx = net_if_get_device(iface)->driver_data; struct net_linkaddr *ll_addr = eth_get_mac(ctx); + int idx; + + ethernet_init(iface); if (ctx->init_done) { return; } ctx->init_done = true; - ctx->iface = iface; #if defined(CONFIG_ETH_NATIVE_POSIX_RANDOM_MAC) /* 00-00-5E-00-53-xx Documentation RFC 7042 */ @@ -216,6 +419,13 @@ static void eth_iface_init(struct net_if *iface) ctx->if_name = CONFIG_ETH_NATIVE_POSIX_DRV_NAME; + idx = net_if_get_by_iface(iface); + if (idx > ARRAY_SIZE(ctx->ifaces)) { + SYS_LOG_ERR("Invalid interface %p index %d", iface, idx); + } else { + ctx->ifaces[idx].iface = iface; + } + ctx->dev_fd = eth_iface_create(ctx->if_name, false); if (ctx->dev_fd < 0) { SYS_LOG_ERR("Cannot create %s (%d)", ctx->if_name, @@ -228,12 +438,106 @@ static void eth_iface_init(struct net_if *iface) } } +#if defined(CONFIG_NET_VLAN) +static int vlan_setup(struct net_if *iface, u16_t tag, bool enable) +{ + struct device *dev = net_if_get_device(iface); + struct eth_context *context = dev->driver_data; + int idx; + + if (tag == NET_VLAN_TAG_UNSPEC) { + return -EBADF; + } + + idx = net_if_get_by_iface(iface); + + if (enable) { + /* Enabling VLAN, check if we already have this setup */ + if (context->ifaces[idx].vlan_tag == tag) { + return -EALREADY; + } + + context->ifaces[idx].iface = iface; + context->ifaces[idx].vlan_tag = tag; + } else { + context->ifaces[idx].iface = NULL; + context->ifaces[idx].vlan_tag = NET_VLAN_TAG_UNSPEC; + } + + return 0; +} +#endif + static const struct ethernet_api eth_if_api = { .iface_api.init = eth_iface_init, .iface_api.send = eth_send, + +#if defined(CONFIG_NET_VLAN) + .vlan_setup = vlan_setup, +#endif +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + .stats = ð_context_data.stats, +#endif }; -NET_DEVICE_INIT(eth_native_posix, CONFIG_ETH_NATIVE_POSIX_DRV_NAME, - eth_init, ð_context_data, NULL, - CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, ð_if_api, - _ETH_L2_LAYER, _ETH_L2_CTX_TYPE, _ETH_MTU); +ETH_NET_DEVICE_INIT(eth_native_posix, CONFIG_ETH_NATIVE_POSIX_DRV_NAME, + eth_init, ð_context_data, NULL, + CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, ð_if_api, + _ETH_MTU); + +#if defined(CONFIG_ETH_NATIVE_POSIX_PTP_CLOCK) +static int ptp_clock_set_native_posix(struct ptp_clock *clk, + struct net_ptp_time *tm) +{ + ARG_UNUSED(clk); + ARG_UNUSED(tm); + + /* We cannot set the host device time so this function + * does nothing. + */ + + return 0; +} + +static int ptp_clock_get_native_posix(struct ptp_clock *clk, + struct net_ptp_time *tm) +{ + return eth_clock_gettime(tm); +} + +static int ptp_clock_adjust_native_posix(struct ptp_clock *clk, + int increment) +{ + ARG_UNUSED(clk); + ARG_UNUSED(increment); + + /* We cannot adjust the host device time so this function + * does nothing. + */ + + return 0; +} + +static int ptp_clock_rate_adjust_native_posix(struct ptp_clock *clk, + float ratio) +{ + ARG_UNUSED(clk); + ARG_UNUSED(ratio); + + /* We cannot adjust the host device time so this function + * does nothing. + */ + + return 0; +} + +static const struct ptp_clock_driver_api api = { + .set = ptp_clock_set_native_posix, + .get = ptp_clock_get_native_posix, + .adjust = ptp_clock_adjust_native_posix, + .rate_adjust = ptp_clock_rate_adjust_native_posix, +}; + +PTP_CLOCK_DEVICE_INIT(eth_native_posix, api); + +#endif /* CONFIG_ETH_NATIVE_POSIX_PTP_CLOCK */ diff --git a/drivers/ethernet/eth_native_posix_adapt.c b/drivers/ethernet/eth_native_posix_adapt.c index a7e2039d4eeb9..e9b7661fbd085 100644 --- a/drivers/ethernet/eth_native_posix_adapt.c +++ b/drivers/ethernet/eth_native_posix_adapt.c @@ -23,6 +23,7 @@ #include #include #include +#include #ifdef __linux #include @@ -38,6 +39,10 @@ #include #include +#if defined(CONFIG_NET_GPTP) +#include +#endif + #include "eth_native_posix_priv.h" /* Note that we cannot create the TUN/TAP device from the setup script @@ -138,3 +143,21 @@ ssize_t eth_write_data(int fd, void *buf, size_t buf_len) { return write(fd, buf, buf_len); } + +#if defined(CONFIG_NET_GPTP) +int eth_clock_gettime(struct net_ptp_time *time) +{ + struct timespec tp; + int ret; + + ret = clock_gettime(CLOCK_MONOTONIC_RAW, &tp); + if (ret < 0) { + return -errno; + } + + time->second = tp.tv_sec; + time->nanosecond = tp.tv_nsec; + + return 0; +} +#endif /* CONFIG_NET_GPTP */ diff --git a/drivers/ethernet/eth_native_posix_priv.h b/drivers/ethernet/eth_native_posix_priv.h index ffcbd62d2d623..17f0ecb926673 100644 --- a/drivers/ethernet/eth_native_posix_priv.h +++ b/drivers/ethernet/eth_native_posix_priv.h @@ -18,4 +18,8 @@ int eth_wait_data(int fd); ssize_t eth_read_data(int fd, void *buf, size_t buf_len); ssize_t eth_write_data(int fd, void *buf, size_t buf_len); +#if defined(CONFIG_NET_GPTP) +int eth_clock_gettime(struct net_ptp_time *time); +#endif + #endif /* _ETH_NATIVE_POSIX_PRIV_H */ diff --git a/drivers/ethernet/eth_sam_gmac.c b/drivers/ethernet/eth_sam_gmac.c index c4f2971c79e2a..3cecc2c8dd27d 100644 --- a/drivers/ethernet/eth_sam_gmac.c +++ b/drivers/ethernet/eth_sam_gmac.c @@ -580,10 +580,31 @@ static struct net_pkt *frame_get(struct gmac_queue *queue) return rx_frame; } +static struct net_if *get_iface(struct eth_sam_dev_data *ctx, + u16_t vlan_tag) +{ +#if defined(CONFIG_NET_VLAN) + int i; + + for (i = 0; i < ARRAY_SIZE(ctx->ifaces); i++) { + if (ctx->ifaces[i].vlan_tag == vlan_tag) { + return ctx->ifaces[i].iface; + } + } + + return NULL; +#else + ARG_UNUSED(vlan_tag); + + return ctx->ifaces[0].iface; +#endif +} + static void eth_rx(struct gmac_queue *queue) { struct eth_sam_dev_data *dev_data = CONTAINER_OF(queue, struct eth_sam_dev_data, queue_list); + u16_t vlan_tag = NET_VLAN_TAG_UNSPEC; struct net_pkt *rx_frame; /* More than one frame could have been received by GMAC, get all @@ -593,7 +614,36 @@ static void eth_rx(struct gmac_queue *queue) while (rx_frame) { SYS_LOG_DBG("ETH rx"); - if (net_recv_data(dev_data->iface, rx_frame) < 0) { +#if defined(CONFIG_NET_VLAN) + /* FIXME: Instead of this, use the GMAC register to get + * the used VLAN tag. + */ + { + struct net_eth_hdr *hdr = NET_ETH_HDR(rx_frame); + + if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) { + struct net_eth_vlan_hdr *hdr_vlan = + (struct net_eth_vlan_hdr *) + NET_ETH_HDR(rx_frame); + + net_pkt_set_vlan_tci(rx_frame, + ntohs(hdr_vlan->vlan.tci)); + vlan_tag = net_pkt_vlan_tag(rx_frame); + +#if CONFIG_NET_TC_RX_COUNT > 1 + { + enum net_priority prio; + + prio = net_vlan2priority( + net_pkt_vlan_priority(rx_frame)); + net_pkt_set_priority(rx_frame, prio); + } +#endif + } + } +#endif + if (net_recv_data(get_iface(dev_data, vlan_tag), + rx_frame) < 0) { net_pkt_unref(rx_frame); } @@ -776,11 +826,25 @@ static void eth0_iface_init(struct net_if *iface) struct device *const dev = net_if_get_device(iface); struct eth_sam_dev_data *const dev_data = DEV_DATA(dev); const struct eth_sam_dev_cfg *const cfg = DEV_CFG(dev); + static bool init_done; u32_t gmac_ncfgr_val; u32_t link_status; int result; + int idx; + + idx = net_if_get_by_iface(iface); + if (idx > ARRAY_SIZE(dev_data->ifaces)) { + SYS_LOG_ERR("Invalid interface %p index %d", iface, idx); + } else { + dev_data->ifaces[idx].iface = iface; + } + + ethernet_init(iface); - dev_data->iface = iface; + /* The rest of initialization should only be done once */ + if (init_done) { + return; + } /* Initialize GMAC driver, maximum frame length is 1518 bytes */ gmac_ncfgr_val = @@ -837,11 +901,47 @@ static void eth0_iface_init(struct net_if *iface) /* Set up link parameters */ link_configure(cfg->regs, link_status); + + init_done = true; } +#if defined(CONFIG_NET_VLAN) +static int vlan_setup(struct net_if *iface, u16_t tag, bool enable) +{ + struct device *dev = net_if_get_device(iface); + struct eth_sam_dev_data *const context = DEV_DATA(dev); + int idx; + + if (tag == NET_VLAN_TAG_UNSPEC) { + return -EBADF; + } + + idx = net_if_get_by_iface(iface); + + if (enable) { + /* Enabling VLAN, check if we already have this setup */ + if (context->ifaces[idx].vlan_tag == tag) { + return -EALREADY; + } + + context->ifaces[idx].iface = iface; + context->ifaces[idx].vlan_tag = tag; + } else { + context->ifaces[idx].iface = NULL; + context->ifaces[idx].vlan_tag = NET_VLAN_TAG_UNSPEC; + } + + return 0; +} +#endif + static const struct ethernet_api eth0_api = { .iface_api.init = eth0_iface_init, .iface_api.send = eth_tx, + +#if defined(CONFIG_NET_VLAN) + .vlan_setup = vlan_setup, +#endif }; static struct device DEVICE_NAME_GET(eth0_sam_gmac); @@ -917,6 +1017,6 @@ static struct eth_sam_dev_data eth0_data = { }, }; -NET_DEVICE_INIT(eth0_sam_gmac, CONFIG_ETH_SAM_GMAC_NAME, eth_initialize, - ð0_data, ð0_config, CONFIG_ETH_INIT_PRIORITY, ð0_api, - ETHERNET_L2, NET_L2_GET_CTX_TYPE(ETHERNET_L2), GMAC_MTU); +ETH_NET_DEVICE_INIT(eth0_sam_gmac, CONFIG_ETH_SAM_GMAC_NAME, eth_initialize, + ð0_data, ð0_config, CONFIG_ETH_INIT_PRIORITY, + ð0_api, GMAC_MTU); diff --git a/drivers/ethernet/eth_sam_gmac_priv.h b/drivers/ethernet/eth_sam_gmac_priv.h index 7db94088f2181..c60df9d3e6281 100644 --- a/drivers/ethernet/eth_sam_gmac_priv.h +++ b/drivers/ethernet/eth_sam_gmac_priv.h @@ -175,9 +175,14 @@ struct eth_sam_dev_cfg { struct phy_sam_gmac_dev phy; }; +struct ifaces { + struct net_if *iface; + u16_t vlan_tag; +}; + /* Device run time data */ struct eth_sam_dev_data { - struct net_if *iface; + struct ifaces ifaces[NET_VLAN_MAX_COUNT]; u8_t mac_addr[6]; struct gmac_queue queue_list[GMAC_QUEUE_NO]; }; diff --git a/drivers/ethernet/eth_stm32_hal.c b/drivers/ethernet/eth_stm32_hal.c index 32a2459b07cc5..dfa879b5adc5b 100644 --- a/drivers/ethernet/eth_stm32_hal.c +++ b/drivers/ethernet/eth_stm32_hal.c @@ -367,6 +367,8 @@ static void eth0_iface_init(struct net_if *iface) net_if_set_link_addr(iface, dev_data->mac_addr, sizeof(dev_data->mac_addr), NET_LINK_ETHERNET); + + ethernet_init(iface); } static const struct ethernet_api eth0_api = { diff --git a/drivers/net/slip.c b/drivers/net/slip.c index db746acb3a021..1cf3460e85111 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -239,8 +240,27 @@ static struct net_pkt *slip_poll_handler(struct slip_context *slip) return NULL; } +static struct net_if *get_iface(struct slip_context *context, + u16_t vlan_tag) +{ +#if defined(CONFIG_NET_VLAN) + struct ethernet_context *ctx = net_if_l2_data(context->iface); + struct net_if *iface; + + iface = net_eth_get_vlan_iface(ctx, vlan_tag); + if (iface) { + return iface; + } +#else + ARG_UNUSED(vlan_tag); +#endif + + return context->iface; +} + static void process_msg(struct slip_context *slip) { + u16_t vlan_tag = NET_VLAN_TAG_UNSPEC; struct net_pkt *pkt; pkt = slip_poll_handler(slip); @@ -248,7 +268,21 @@ static void process_msg(struct slip_context *slip) return; } - if (net_recv_data(slip->iface, pkt) < 0) { +#if defined(CONFIG_NET_VLAN) + { + struct net_eth_hdr *hdr = NET_ETH_HDR(pkt); + + if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) { + struct net_eth_vlan_hdr *hdr_vlan = + (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); + + net_pkt_set_vlan_tci(pkt, ntohs(hdr_vlan->vlan.tci)); + vlan_tag = net_pkt_vlan_tag(pkt); + } + } +#endif + + if (net_recv_data(get_iface(slip, vlan_tag), pkt) < 0) { net_pkt_unref(pkt); } @@ -441,8 +475,15 @@ static inline struct net_linkaddr *slip_get_mac(struct slip_context *slip) static void slip_iface_init(struct net_if *iface) { struct slip_context *slip = net_if_get_device(iface)->driver_data; - struct net_linkaddr *ll_addr = slip_get_mac(slip); + struct net_linkaddr *ll_addr; + + ethernet_init(iface); + + if (slip->init_done) { + return; + } + ll_addr = slip_get_mac(slip); slip->init_done = true; slip->iface = iface; @@ -476,6 +517,11 @@ static const struct ethernet_api slip_if_api = { #define _SLIP_L2_LAYER ETHERNET_L2 #define _SLIP_L2_CTX_TYPE NET_L2_GET_CTX_TYPE(ETHERNET_L2) #define _SLIP_MTU 1500 + +ETH_NET_DEVICE_INIT(slip, CONFIG_SLIP_DRV_NAME, slip_init, &slip_context_data, + NULL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &slip_if_api, + _SLIP_MTU); + #else static const struct net_if_api slip_if_api = { @@ -486,8 +532,8 @@ static const struct net_if_api slip_if_api = { #define _SLIP_L2_LAYER DUMMY_L2 #define _SLIP_L2_CTX_TYPE NET_L2_GET_CTX_TYPE(DUMMY_L2) #define _SLIP_MTU 576 -#endif NET_DEVICE_INIT(slip, CONFIG_SLIP_DRV_NAME, slip_init, &slip_context_data, NULL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &slip_if_api, _SLIP_L2_LAYER, _SLIP_L2_CTX_TYPE, _SLIP_MTU); +#endif diff --git a/drivers/ptp_clock/CMakeLists.txt b/drivers/ptp_clock/CMakeLists.txt new file mode 100644 index 0000000000000..30b293b956c0a --- /dev/null +++ b/drivers/ptp_clock/CMakeLists.txt @@ -0,0 +1 @@ +zephyr_sources_ifdef(CONFIG_NET_GPTP ptp_clock.c) diff --git a/drivers/ptp_clock/Kconfig b/drivers/ptp_clock/Kconfig new file mode 100644 index 0000000000000..e606735874065 --- /dev/null +++ b/drivers/ptp_clock/Kconfig @@ -0,0 +1,11 @@ +# +# Copyright (c) 2018 Intel Corporation. +# +# SPDX-License-Identifier: Apache-2.0 +# + +menuconfig PTP_CLOCK + bool "Precision Time Protocol Clock driver support" + default n + help + Enable options for Precision Time Protocol Clock drivers. diff --git a/drivers/ptp_clock/ptp_clock.c b/drivers/ptp_clock/ptp_clock.c new file mode 100644 index 0000000000000..12ef7772fb031 --- /dev/null +++ b/drivers/ptp_clock/ptp_clock.c @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +/* ptp_clock dedicated section limiters */ +extern struct ptp_clock __ptp_clock_start[]; +extern struct ptp_clock __ptp_clock_end[]; + +struct ptp_clock *ptp_clock_lookup_by_dev(struct device *dev) +{ + struct ptp_clock *clk; + + for (clk = __ptp_clock_start; clk != __ptp_clock_end; clk++) { + if (clk->dev == dev) { + return clk; + } + } + + return NULL; +} diff --git a/ext/hal/nxp/mcux/drivers/CMakeLists.txt b/ext/hal/nxp/mcux/drivers/CMakeLists.txt index f61b91e4ef098..5dfd8651f9448 100644 --- a/ext/hal/nxp/mcux/drivers/CMakeLists.txt +++ b/ext/hal/nxp/mcux/drivers/CMakeLists.txt @@ -1,5 +1,9 @@ zephyr_include_directories(.) +zephyr_library_compile_definitions_ifdef( + CONFIG_PTP_CLOCK_MCUX ENET_ENHANCEDBUFFERDESCRIPTOR_MODE +) + zephyr_sources_ifdef(CONFIG_ADC_MCUX_ADC16 fsl_adc16.c) zephyr_sources_ifdef(CONFIG_ETH_MCUX fsl_enet.c) zephyr_sources_ifdef(CONFIG_GPIO_MCUX_IGPIO fsl_igpio.c) diff --git a/ext/hal/nxp/mcux/drivers/fsl_enet.c b/ext/hal/nxp/mcux/drivers/fsl_enet.c index 2e7e002425c8f..adb1509711b12 100644 --- a/ext/hal/nxp/mcux/drivers/fsl_enet.c +++ b/ext/hal/nxp/mcux/drivers/fsl_enet.c @@ -2154,7 +2154,7 @@ static bool ENET_Ptp1588ParseFrame(const uint8_t *data, enet_ptp_time_data_t *pt switch (ENET_HTONS(ptpType)) { /* Ethernet layer 2. */ case ENET_ETHERNETL2: - if (*(uint8_t *)(buffer + ENET_PTP1588_ETHL2_MSGTYPE_OFFSET) <= kENET_PtpEventMsgType) + if ((*(uint8_t *)(buffer + ENET_PTP1588_ETHL2_MSGTYPE_OFFSET) & 0xf) <= kENET_PtpEventMsgType) { isPtpMsg = true; if (!isFastEnabled) @@ -2522,6 +2522,9 @@ static status_t ENET_StoreTxFrameTime(ENET_Type *base, enet_handle_t *handle, ui ptpTimeData.timeStamp.second = handle->msTimerSecond - 1; } + /* Save transmit time stamp nanosecond. */ + ptpTimeData.timeStamp.nanosecond = curBuffDescrip->timestamp; + /* Enable the interrupt. */ EnableGlobalIRQ(primask); diff --git a/include/linker/common-rom.ld b/include/linker/common-rom.ld index 43e8a2eec0874..0114d944011c9 100644 --- a/include/linker/common-rom.ld +++ b/include/linker/common-rom.ld @@ -50,3 +50,13 @@ KEEP(*(SORT_BY_NAME(".net_l2.init*"))) __net_l2_end = .; } GROUP_LINK_IN(ROMABLE_REGION) + +#if defined(CONFIG_NET_GPTP) + SECTION_PROLOGUE(ptp_clock, (OPTIONAL), ) + { + __ptp_clock_start = .; + *(".ptp_clock.*") + KEEP(*(SORT_BY_NAME(".ptp_clock.*"))) + __ptp_clock_end = .; + } GROUP_LINK_IN(ROMABLE_REGION) +#endif diff --git a/include/net/ethernet.h b/include/net/ethernet.h index 2fa19d45501d1..ebd093b3b1993 100644 --- a/include/net/ethernet.h +++ b/include/net/ethernet.h @@ -15,10 +15,13 @@ #include #include +#include #include #include #include +#include +#include #ifdef __cplusplus extern "C" { @@ -36,6 +39,8 @@ extern "C" { #define NET_ETH_PTYPE_ARP 0x0806 #define NET_ETH_PTYPE_IP 0x0800 #define NET_ETH_PTYPE_IPV6 0x86dd +#define NET_ETH_PTYPE_VLAN 0x8100 +#define NET_ETH_PTYPE_PTP 0x88f7 #define NET_ETH_MINIMAL_FRAME_SIZE 60 @@ -54,8 +59,23 @@ struct ethernet_api { */ struct net_if_api iface_api; +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + /** Collect optional ethernet specific statistics. This pointer + * should be set by driver if statistics needs to be collected + * for that driver. + */ + struct net_stats_eth *stats; +#endif + /** Get the device capabilities */ enum eth_hw_caps (*get_capabilities)(struct device *dev); + + /** The IP stack will call this function when a VLAN tag is enabled + * or disabled. If enable is set to true, then the VLAN tag was added, + * if it is false then the tag was removed. The driver can utilize + * this information if needed. + */ + int (*vlan_setup)(struct net_if *iface, u16_t tag, bool enable); }; struct net_eth_addr { @@ -68,6 +88,70 @@ struct net_eth_hdr { u16_t type; } __packed; +struct ethernet_vlan { + /** Network interface that has VLAN enabled */ + struct net_if *iface; + + /** VLAN tag */ + u16_t tag; +}; + +#if defined(CONFIG_NET_VLAN_COUNT) +#define NET_VLAN_MAX_COUNT CONFIG_NET_VLAN_COUNT +#else +/* Even thou there are no VLAN support, the minimum count must be set to 1. + */ +#define NET_VLAN_MAX_COUNT 1 +#endif + +/** Ethernet L2 context that is needed for VLAN */ +struct ethernet_context { +#if defined(CONFIG_NET_VLAN) + struct ethernet_vlan vlan[NET_VLAN_MAX_COUNT]; + + /** Array that will help when checking if VLAN is enabled for + * some specific network interface. Requires that VLAN count + * NET_VLAN_MAX_COUNT is not smaller than the actual number + * of network interfaces. + */ + ATOMIC_DEFINE(interfaces, NET_VLAN_MAX_COUNT); + + /** Flag that tells whether how many VLAN tags are enabled for this + * context. The same information can be dug from the vlan array but + * this saves some time in RX path. + */ + s8_t vlan_enabled; +#endif + + /** Is this context already initialized */ + bool is_init; +}; + +#define ETHERNET_L2_CTX_TYPE struct ethernet_context + +/** + * @brief Initialize Ethernet L2 stack for a given interface + * + * @param iface A valid pointer to a network interface + */ +void ethernet_init(struct net_if *iface); + +#if defined(CONFIG_NET_VLAN) +/* Separate header for VLAN as some of device interfaces might not + * support VLAN. + */ +struct net_eth_vlan_hdr { + struct net_eth_addr dst; + struct net_eth_addr src; + struct { + u16_t tpid; /* tag protocol id */ + u16_t tci; /* tag control info */ + } vlan; + u16_t type; +} __packed; + +#endif /* CONFIG_NET_VLAN */ + static inline bool net_eth_is_addr_broadcast(struct net_eth_addr *addr) { if (addr->addr[0] == 0xff && @@ -102,6 +186,22 @@ static inline bool net_eth_is_addr_multicast(struct net_eth_addr *addr) return false; } +static inline bool net_eth_is_addr_lldp_multicast(struct net_eth_addr *addr) +{ +#if defined(CONFIG_NET_GPTP) + if (addr->addr[0] == 0x01 && + addr->addr[1] == 0x80 && + addr->addr[2] == 0xc2 && + addr->addr[3] == 0x00 && + addr->addr[4] == 0x00 && + addr->addr[5] == 0x0e) { + return true; + } +#endif + + return false; +} + const struct net_eth_addr *net_eth_broadcast_addr(void); /** @@ -133,6 +233,109 @@ enum eth_hw_caps net_eth_get_hw_capabilities(struct net_if *iface) return eth->get_capabilities(net_if_get_device(iface)); } +#if defined(CONFIG_NET_VLAN) +/** + * @brief Add VLAN tag to the interface. + * + * @param iface Interface to use. + * @param tag VLAN tag to add + * + * @return 0 if ok, <0 if error + */ +int net_eth_vlan_enable(struct net_if *iface, u16_t tag); + +/** + * @brief Remove VLAN tag from the interface. + * + * @param iface Interface to use. + * @param tag VLAN tag to remove + * + * @return 0 if ok, <0 if error + */ +int net_eth_vlan_disable(struct net_if *iface, u16_t tag); + +/** + * @brief Return VLAN tag specified to network interface + * + * @param iface Network interface. + * + * @return VLAN tag for this interface or NET_VLAN_TAG_UNSPEC if VLAN + * is not configured for that interface. + */ +u16_t net_eth_get_vlan_tag(struct net_if *iface); + +/** + * @brief Return network interface related to this VLAN tag + * + * @param ctx Ethernet context + * @param tag VLAN tag + * + * @return Network interface related to this tag or NULL if no such interface + * exists. + */ +struct net_if *net_eth_get_vlan_iface(struct ethernet_context *ctx, u16_t tag); + +/** + * @brief Check if VLAN is enabled for a specific network interface. + * + * @param ctx Ethernet context + * @param iface Network interface + * + * @return True if VLAN is enabled for this network interface, false if not. + */ +bool net_eth_is_vlan_enabled(struct ethernet_context *ctx, + struct net_if *iface); + +#define ETH_NET_DEVICE_INIT(dev_name, drv_name, init_fn, \ + data, cfg_info, prio, api, mtu) \ + DEVICE_AND_API_INIT(dev_name, drv_name, init_fn, data, \ + cfg_info, POST_KERNEL, prio, api); \ + NET_L2_DATA_INIT(dev_name, 0, NET_L2_GET_CTX_TYPE(ETHERNET_L2)); \ + NET_IF_INIT(dev_name, 0, ETHERNET_L2, mtu, NET_VLAN_MAX_COUNT) + +#else /* CONFIG_NET_VLAN */ + +#define ETH_NET_DEVICE_INIT(dev_name, drv_name, init_fn, \ + data, cfg_info, prio, api, mtu) \ + NET_DEVICE_INIT(dev_name, drv_name, init_fn, \ + data, cfg_info, prio, api, ETHERNET_L2, \ + NET_L2_GET_CTX_TYPE(ETHERNET_L2), mtu) + +static inline int net_eth_vlan_enable(struct net_if *iface, u16_t vlan_tag) +{ + return -EINVAL; +} + +static inline int net_eth_vlan_disable(struct net_if *iface, u16_t vlan_tag) +{ + return -EINVAL; +} + +static inline u16_t net_eth_get_vlan_tag(struct net_if *iface) +{ + return NET_VLAN_TAG_UNSPEC; +} +#endif /* CONFIG_NET_VLAN */ + +/** + * @brief Fill ethernet header in network packet. + * + * @param ctx Ethernet context + * @param pkt Network packet + * @param frag Ethernet header in packet + * @param ptype Upper level protocol type (in network byte order) + * @param src Source ethernet address + * @param dst Destination ethernet address + * + * @return Pointer to ethernet header struct inside net_buf. + */ +struct net_eth_hdr *net_eth_fill_header(struct ethernet_context *ctx, + struct net_pkt *pkt, + struct net_buf *frag, + u32_t ptype, + u8_t *src, + u8_t *dst); + #ifdef __cplusplus } #endif diff --git a/include/net/gptp.h b/include/net/gptp.h new file mode 100644 index 0000000000000..718defc4e97fa --- /dev/null +++ b/include/net/gptp.h @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief Public functions for the Precision Time Protocol Stack. + * + */ + +#ifndef __GPTP_H +#define __GPTP_H + +/** + * @brief generic Precision Time Protocol (gPTP) support + * @defgroup gptp gPTP support + * @ingroup networking + * @{ + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define GPTP_CLOCK_ACCURACY_UNKNOWN 0xFE +#define GPTP_OFFSET_SCALED_LOG_VAR_UNKNOWN 0x436A +#define GPTP_PRIORITY1_NON_GM_CAPABLE 255 +#define GPTP_PRIORITY2_DEFAULT 248 + +/** + * @brief Scaled Nanoseconds. + */ +struct gptp_scaled_ns { + /** High half. */ + s32_t high; + + /** Low half. */ + s64_t low; +} __packed; + +/** + * @brief UScaled Nanoseconds. + */ +struct gptp_uscaled_ns { + /** High half. */ + u32_t high; + + /** Low half. */ + u64_t low; +} __packed; + +/** + * @brief Precision Time Protocol Timestamp format. + * + * This structure represents a timestamp according + * to the Precision Time Protocol standard. + * + * Seconds are encoded on a 48 bits unsigned integer. + * Nanoseconds are encoded on a 32 bits unsigned integer. + */ +struct net_ptp_time { + /** Seconds encoded on 48 bits. */ + union { + struct { +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + u32_t low; + u16_t high; + u16_t unused; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + u16_t unused; + u16_t high; + u32_t low; +#else +#error "Unknown byte order" +#endif + } _sec; + u64_t second; + }; + + /** Nanoseconds. */ + u32_t nanosecond; +}; + +struct net_ptp_extended_timestamp { + /** Seconds encoded on 48 bits. */ + union { + struct { +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + u32_t low; + u16_t high; + u16_t unused; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + u16_t unused; + u16_t high; + u32_t low; +#else +#error "Unknown byte order" +#endif + } _sec; + u64_t second; + }; + + /** Nanoseconds encoded on 48 bits. */ + union { + struct { +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + u32_t low; + u16_t high; + u16_t unused; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + u16_t unused; + u16_t high; + u32_t low; +#else +#error "Unknown byte order" +#endif + } _nsec; + u64_t nanosecond; + }; +}; + +#if defined(CONFIG_NET_GPTP) +#if defined(CONFIG_NEWLIB_LIBC) +#include + +#define GPTP_POW2(exp) pow(2, exp) +#else + +static inline double _gptp_pow2(int exp) +{ + double res = 1; + if (!exp) { + return 1; + } + + if (exp > 0) { + while (exp--) { + res *= 2; + } + } else { + while (exp++) { + res /= 2; + } + } + + return res; +} + +#define GPTP_POW2(exp) _gptp_pow2(exp) +#endif + +/* + * TODO: k_uptime_get need to be replaced by the MAC ptp_clock. + * The ptp_clock access infrastructure is not ready yet + * so use it for the time being. + * k_uptime time precision is in ms. + */ +#define GPTP_GET_CURRENT_TIME_NANOSECOND() (k_uptime_get() * 1000000) +#define GPTP_GET_CURRENT_TIME_USCALED_NS(uscaled_ns_ptr) \ + do { \ + (uscaled_ns_ptr)->low = \ + GPTP_GET_CURRENT_TIME_NANOSECOND() << 16; \ + (uscaled_ns_ptr)->high = 0; \ + } while (0) + +/** + * @typedef gptp_phase_dis_callback_t + * @brief Define callback that is called after a phase discontinuity has been + * sent by the grandmaster. + * @param "u8_t *gm_identity" A pointer to first element of a + * ClockIdentity array. The size of the array is GPTP_CLOCK_ID_LEN. + * @param "u16_t *gm_time_base" A pointer to the value of timeBaseIndicator + * of the current grandmaster. + * @param "struct scaled_ns *last_gm_ph_change" A pointer to the value of + * lastGmPhaseChange received from grandmaster. + * @param "double *last_gm_freq_change" A pointer to the value of + * lastGmFreqChange received from the grandmaster. + */ +typedef void (*gptp_phase_dis_callback_t)( + u8_t *gm_identity, + u16_t *time_base, + struct gptp_scaled_ns *last_gm_ph_change, + double *last_gm_freq_change); + +/** + * @brief Phase discontinuity callback structure. + * + * Stores the phase discontinuity callback information. Caller must make sure + * that the variable pointed by this is valid during the lifetime of + * registration. Typically this means that the variable cannot be + * allocated from stack. + */ +struct gptp_phase_dis_cb { + /** Node information for the slist. */ + sys_snode_t node; + + /** Phase discontinuity callback. */ + gptp_phase_dis_callback_t cb; +}; + +/** + * @brief Register a phase discontinuity callback. + * + * @param phase_dis Caller specified handler for the callback. + * @param cb Callback to register. + */ +void gptp_register_phase_dis_cb(struct gptp_phase_dis_cb *phase_dis, + gptp_phase_dis_callback_t cb); + +/** + * @brief Unregister a phase discontinuity callback. + * + * @param phase_dis Caller specified handler for the callback. + */ +void gptp_unregister_phase_dis_cb(struct gptp_phase_dis_cb *phase_dis); + +/** + * @brief Call a phase discontinuity callback function. + */ +void gptp_call_phase_dis_cb(void); + +/** + * @brief Get gPTP time. + * + * @param "struct net_ptp_time *slave_time" A pointer to structure where + * timestamp will be saved. + * + * @param "bool *gm_present" A pointer to a boolean where status of the + * presence of a grand master will be saved. + * + * @return Error code. 0 if no error. + */ +int gptp_event_capture(struct net_ptp_time *slave_time, bool *gm_present); + +/** + * @brief Utility function to print clock id to a user supplied buffer. + * + * @param clk_id Clock id + * @param output Output buffer + * @param output_len Output buffer len + * + * @return Pointer to output buffer + */ +char *gptp_sprint_clock_id(const u8_t *clk_id, char *output, + size_t output_len); + +/** + * @typedef gptp_port_cb_t + * @brief Callback used while iterating over gPTP ports + * + * @param port Port number + * @param iface Pointer to network interface + * @param user_data A valid pointer to user data or NULL + */ +typedef void (*gptp_port_cb_t)(int port, struct net_if *iface, + void *user_data); + +/** + * @brief Go through all the gPTP ports and call callback for each of them. + * + * @param cb User-supplied callback function to call + * @param user_data User specified data + */ +void gptp_foreach_port(gptp_port_cb_t cb, void *user_data); + +/** + * @brief Get gPTP domain. + * @details This contains all the configuration / status of the gPTP domain. + * + * @return Pointer to domain or NULL if not found. + */ +struct gptp_domain *gptp_get_domain(void); + +#endif /* CONFIG_NET_GPTP */ + +#ifdef __cplusplus +} +#endif + +/** + * @} + */ + +#endif /* __GPTP_H */ diff --git a/include/net/gptp_data_set.h b/include/net/gptp_data_set.h new file mode 100644 index 0000000000000..7fe866c01879f --- /dev/null +++ b/include/net/gptp_data_set.h @@ -0,0 +1,594 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief PTP data sets + * + * This is not to be included by the application. + */ + +#ifndef __GPTP_DS_H +#define __GPTP_DS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(CONFIG_NET_GPTP) + +#include +#include + +/* Parameters for PTP data sets. */ +#define GPTP_ALLOWED_LOST_RESP 3 + +#if defined(CONFIG_NET_GPTP_NEIGHBOR_PROP_DELAY_THR) +#define GPTP_NEIGHBOR_PROP_DELAY_THR CONFIG_NET_GPTP_NEIGHBOR_PROP_DELAY_THR +#else +/* See IEEE802.1AS B.3 should be less than 800ns (cur: 100us). */ +#define GPTP_NEIGHBOR_PROP_DELAY_THR 100000 +#endif + +/* Max number of ClockIdentities in pathTrace. */ +#define GPTP_MAX_PATHTRACE_SIZE 256 + +/* Helpers to access gptp_domain fields. */ +#define GPTP_PORT_START 1 +#define GPTP_PORT_END (gptp_domain.default_ds.nb_ports + GPTP_PORT_START) + +#define GPTP_PORT_INDEX (port - GPTP_PORT_START) + +#define GPTP_GLOBAL_DS() (&gptp_domain.global_ds) +#define GPTP_DEFAULT_DS() (&gptp_domain.default_ds) +#define GPTP_CURRENT_DS() (&gptp_domain.current_ds) +#define GPTP_PARENT_DS() (&gptp_domain.parent_ds) +#define GPTP_PROPERTIES_DS() (&gptp_domain.properties_ds) +#define GPTP_STATE() (&gptp_domain.state) + +#define GPTP_PORT_DS(port) \ + (&gptp_domain.port_ds[port - GPTP_PORT_START]) +#define GPTP_PORT_STATE(port) \ + (&gptp_domain.port_state[port - GPTP_PORT_START]) +#define GPTP_PORT_BMCA_DATA(port) \ + (&gptp_domain.port_bmca_data[port - GPTP_PORT_START]) +#define GPTP_PORT_IFACE(port) \ + gptp_domain.iface[port - GPTP_PORT_START] +#define GPTP_PORT_DRV(port) \ + net_if_get_device(GPTP_PORT_IFACE(port)) + +#if defined(CONFIG_NET_GPTP_STATISTICS) +#define GPTP_PORT_PARAM_DS(port) \ + (&gptp_domain.port_param_ds[port - GPTP_PORT_START]) +#endif + +#define CLEAR_RESELECT(global_ds, port) \ + global_ds->reselect_array &= (~(1 << (port - 1))) +#define SET_RESELECT(global_ds, port) \ + global_ds->reselect_array |= (1 << (port - 1)) +#define CLEAR_SELECTED(global_ds, port) \ + global_ds->selected_array &= (~(1 << (port - 1))) +#define SET_SELECTED(global_ds, port) \ + global_ds->selected_array |= (1 << (port - 1)) +#define IS_SELECTED(global_ds, port) \ + ((global_ds->selected_array >> (port - 1)) & 0x1) +#define IS_RESELECT(global_ds, port) \ + ((global_ds->reselect_array >> (port - 1)) & 0x1) + +/* + * Global definition of the gPTP domain. + * Note: Only one domain is supported for now. + */ +extern struct gptp_domain gptp_domain; + +/* + * Type of TLV message received. + */ +enum gptp_tlv_type { + GPTP_TLV_MGNT = 0x0001, + GPTP_TLV_MGNT_ERR_STATUS = 0x0002, + GPTP_TLV_ORGANIZATION_EXT = 0x0003, + GPTP_TLV_REQ_UNICAST_TX = 0x0004, + GPTP_TLV_GRANT_UNICAST_TX = 0x0005, + GPTP_TLV_CANCEL_UNICAST_TX = 0x0006, + GPTP_TLV_ACK_CANCEL_UNICAST_TX = 0x0007, + GPTP_TLV_PATH_TRACE = 0x0008, + GPTP_TLV_ALT_TIME_OFFSET_INDICATOR = 0x0008, + GPTP_TLV_AUTH = 0x2000, + GPTP_TLV_AUTH_CHALLENGE = 0x2001, + GPTP_TLV_SECURITY_ASSOC_UPDATE = 0x2002, + GPTP_TLV_CUM_FREQ_SCALE_FACTOR_OFFSET = 0x2003, +}; + +/* + * Class of the local clock used for a port. + * This is used when determining the Grand Master. + */ +enum gptp_clock_class { + GPTP_CLASS_PRIMARY = 6, + GPTP_CLASS_APP_SPECIFIC = 13, + GPTP_CLASS_APP_SPECIFIC_LOST = 14, + GPTP_CLASS_PRIMARY_DEGRADED_A = 52, + GPTP_CLASS_APP_SPECIFIC_DEGRADED_A = 58, + GPTP_CLASS_PRIMARY_DEGRADED_B = 187, + GPTP_CLASS_APP_SPECIFIC_DEGRADED_B = 193, + GPTP_CLASS_OTHER = 248, + GPTP_CLASS_SLAVE_ONLY = 255, +}; + +/* + * For gPTP, only a subset are used. + * - DisabledPort + * - MasterPort + * - PassivePort + * - SlavePort + */ +enum gptp_port_state { + GPTP_PORT_INITIALIZING, + GPTP_PORT_FAULTY, + GPTP_PORT_DISABLED, + GPTP_PORT_LISTENING, + GPTP_PORT_PRE_MASTER, + GPTP_PORT_MASTER, + GPTP_PORT_PASSIVE, + GPTP_PORT_UNCALIBRATED, + GPTP_PORT_SLAVE, +}; + +enum gptp_received_info { + GPTP_RCVD_INFO_SUPERIOR_MASTER_INFO, + GPTP_RCVD_INFO_REPEATED_MASTER_INFO, + GPTP_RCVD_INFO_INFERIOR_MASTER_INFO, + GPTP_RCVD_INFO_OTHER_INFO, +}; + +/** + * @brief Announce path trace retaining structure. + */ +struct gptp_path_trace { + /** Length of the path trace. */ + u16_t len; + + /** Path trace of the announce message. */ + u8_t path_sequence[GPTP_MAX_PATHTRACE_SIZE][GPTP_CLOCK_ID_LEN]; +}; + +/** + * @brief Per-time-aware system global variables. + * + * Not all variables from the standard are defined yet. + * The structure is to be enhanced with missing fields when those are needed. + * + * selectedRole is not defined here as it is a duplicate of the port_state + * variable declared in gptp_port_ds. + */ +struct gptp_global_ds { + /** The synchronized time computed by the ClockSlave entity. */ + struct net_ptp_time sync_receipt_time; + + /** Last Grand Master Phase Change. */ + struct gptp_scaled_ns clk_src_last_gm_phase_change; + + /** Last Grand Master Phase Change. */ + struct gptp_scaled_ns last_gm_phase_change; + + /** Path trace to be sent in announce message. */ + struct gptp_path_trace path_trace; + + /** Grand Master priority vector. */ + struct gptp_priority_vector gm_priority; + + /** Previous Grand Master priority vector. */ + struct gptp_priority_vector last_gm_priority; + + /** Global flags. */ + struct gptp_flags global_flags; + + /** System current flags. */ + struct gptp_flags sys_flags; + + /** Mean time interval between messages providing time-sync info. */ + u64_t clk_master_sync_itv; + + /** Value if current time. */ + u64_t sync_receipt_local_time; + + /** Time provided by the ClockSource entity minus the sync time. */ + s64_t clk_src_phase_offset; + + /** Fractional frequency offset of the Clock Source entity. */ + double clk_src_freq_offset; + + /** Last Grand Master Frequency Change. */ + double clk_src_last_gm_freq_change; + + /** Ratio of the frequency of the ClockSource to the LocalClock. */ + double gm_rate_ratio; + + /** Last Grand Master Frequency Change. */ + double last_gm_freq_change; + + /** Time source. */ + enum gptp_time_source time_source; + + /** System time source. */ + enum gptp_time_source sys_time_source; + + /** Selected port Roles. */ + enum gptp_port_state selected_role[CONFIG_NET_GPTP_NUM_PORTS + 1]; + + /** Reselect port bit array. */ + u32_t reselect_array; + + /** Selected port bit array. */ + u32_t selected_array; + + /** Steps removed from selected master. */ + u16_t master_steps_removed; + + /** Current UTC offset. */ + s16_t current_utc_offset; + + /** System current UTC offset. */ + s16_t sys_current_utc_offset; + + /** Time Base Indicator. */ + u16_t clk_src_time_base_indicator; + + /** Previous Time Base Indicator. */ + u16_t clk_src_time_base_indicator_prev; + + /** Grand Master Time Base Indicator. */ + u16_t gm_time_base_indicator; + + /** A Grand Master is present in the domain. */ + bool gm_present; +}; + +/** + * @brief Default Parameter Data Set. + * + * Data Set representing capabilities of the time-aware system. + */ +struct gptp_default_ds { + /** System current flags. */ + struct gptp_flags flags; + + /** Quality of the local clock. */ + struct gptp_clock_quality clk_quality; + + /* Source of time used by the Grand Master Clock. */ + enum gptp_time_source time_source; + + /** Current UTC offset. */ + u16_t cur_utc_offset; + + /** Defines if this system is Grand Master capable. */ + bool gm_capable; + + /** Clock Identity of the local clock. */ + u8_t clk_id[GPTP_CLOCK_ID_LEN]; + + /** Number of ports of the time-aware system. */ + u8_t nb_ports; + + /** Primary priority of the time-aware system. */ + u8_t priority1; + + /** Secondary priority of the time-aware system. */ + u8_t priority2; +}; + +/** + * @brief Current Parameter Data Set. + * + * Data Set representing information relative to the Grand Master. + */ +struct gptp_current_ds { + /** Last Grand Master Phase change . */ + struct gptp_scaled_ns last_gm_phase_change; + + /** Time difference between a slave and the Grand Master. */ + s64_t offset_from_master; + + /** Last Grand Master Frequency change. */ + double last_gm_freq_change; + + /** Number of times a Grand Master has changed in the domain. */ + u32_t gm_change_count; + + /** Time when the most recent Grand Master changed. */ + u32_t last_gm_chg_evt_time; + + /** Time when the most recent Grand Master phase changed. */ + u32_t last_gm_phase_chg_evt_time; + + /** Time when the most recent Grand Master frequency changed. */ + u32_t last_gm_freq_chg_evt_time; + + /** Time Base Indicator of the current Grand Master. */ + u16_t gm_timebase_indicator; + + /** Number of steps between the local clock and the Grand Master. */ + u8_t steps_removed; +}; + +/** + * @brief Parent Parameter Data Set. + * + * Data Set representing the parent capabilities. + */ +struct gptp_parent_ds { + /** Port Identity of the Master Port attached to this system. */ + struct gptp_port_identity port_id; + + /** Ratio of the frequency of the GM with the local clock. */ + s32_t cumulative_rate_ratio; + + /** Clock Identity of the Grand Master clock. */ + u8_t gm_id[GPTP_CLOCK_ID_LEN]; + + /** Clock Class of the Grand Master clock. */ + struct gptp_clock_quality gm_clk_quality; + + /** Primary Priority of the Grand Master clock. */ + u8_t gm_priority1; + + /** Secondary Priority of the Grand Master clock. */ + u8_t gm_priority2; +}; + +/** + * @brief Time Properties Parameter Data Set. + * + * Data Set representing Grand Master capabilities from the point of view + * of this system. + */ +struct gptp_time_prop_ds { + /** The time source of the Grand Master. */ + enum gptp_time_source time_source; + + /** Current UTC offset for the Grand Master. */ + u16_t cur_utc_offset; + + /** Current UTC offset valid for the Grand Master. */ + bool cur_utc_offset_valid : 1; + + /** The Grand Master will have 59s at the end of the current UTC day. + */ + bool leap59 : 1; + + /** The Grand Master will have 61s at the end of the current UTC day. + */ + bool leap61 : 1; + + /** The current UTC offset of the GM is traceable to a primary ref. */ + bool time_traceable : 1; + + /** The frequency of the Grand Master is traceable to a primary ref. */ + bool freq_traceable : 1; +}; + +/** + * @brief Port Parameter Data Set. + * + * Data Set representing port capabilities. + */ +struct gptp_port_ds { + /** Port Identity of the port. */ + struct gptp_port_identity port_id; + + /** Sync event transmission interval for the port. */ + struct gptp_uscaled_ns half_sync_itv; + + /** Path Delay Request transmission interval for the port. */ + struct gptp_uscaled_ns pdelay_req_itv; + + /** Maximum interval between sync messages. */ + u64_t sync_receipt_timeout_time_itv; + + /** Asymmetry on the link relative to the grand master time base. */ + s64_t delay_asymmetry; + + /** One way propagation time on the link attached to this port. */ + double neighbor_prop_delay; + + /** Propagation time threshold for the link attached to this port. */ + double neighbor_prop_delay_thresh; + + /** Estimate of the ratio of the frequency with the peer. */ + double neighbor_rate_ratio; + + /** Maximum number of Path Delay Requests without a response. */ + u16_t allowed_lost_responses; + + /** Current Sync sequence id for this port. */ + u16_t sync_seq_id; + + /** Current Path Delay Request sequence id for this port. */ + u16_t pdelay_req_seq_id; + + /** Current Announce sequence id for this port. */ + u16_t announce_seq_id; + + /** Current Signaling sequence id for this port. */ + u16_t signaling_seq_id; + + /** Time synchronization and Best Master Selection enabled. */ + bool ptt_port_enabled : 1; + + /** Previous status of ptt_port_enabled. */ + bool prev_ptt_port_enabled : 1; + + /** The port is measuring the path delay. */ + bool is_measuring_delay : 1; + + /** The port is capable of running IEEE802.1AS. */ + bool as_capable : 1; + + /** Whether neighborRateRatio needs to be computed for this port. */ + bool compute_neighbor_rate_ratio : 1; + + /** Whether neighborPropDelay needs to be computed for this port. */ + bool compute_neighbor_prop_delay : 1; + + /** Initial Announce Interval as a Logarithm to base 2. */ + s8_t ini_log_announce_itv; + + /** Current Announce Interval as a Logarithm to base 2. */ + s8_t cur_log_announce_itv; + + /** Time without receiving announce messages before running BMCA. */ + u8_t announce_receipt_timeout; + + /** Initial Sync Interval as a Logarithm to base 2. */ + s8_t ini_log_half_sync_itv; + + /** Current Sync Interval as a Logarithm to base 2. */ + s8_t cur_log_half_sync_itv; + + /** Time without receiving sync messages before running BMCA. */ + u8_t sync_receipt_timeout; + + /** Initial Path Delay Request Interval as a Logarithm to base 2. */ + s8_t ini_log_pdelay_req_itv; + + /** Current Path Delay Request Interval as a Logarithm to base 2. */ + s8_t cur_log_pdelay_req_itv; + + /** Version of PTP running on this port. */ + u8_t version; +}; + +/** + * @brief Port Parameter Statistics. + * + * Data Set containing statistics associated with various events. + */ +struct gptp_port_param_ds { + /** Number of Sync messages received. */ + u32_t rx_sync_count; + + /** Number of Follow Up messages received. */ + u32_t rx_fup_count; + + /** Number of Path Delay Requests messages received. */ + u32_t rx_pdelay_req_count; + + /** Number of Path Delay Response messages received. */ + u32_t rx_pdelay_resp_count; + + /** Number of Path Delay Follow Up messages received. */ + u32_t rx_pdelay_resp_fup_count; + + /** Number of Announce messages received. */ + u32_t rx_announce_count; + + /** Number of ptp messages discarded. */ + u32_t rx_ptp_packet_discard_count; + + /** Number of Sync reception timeout. */ + u32_t sync_receipt_timeout_count; + + /** Number of Announce reception timeout. */ + u32_t announce_receipt_timeout_count; + + /** Number Path Delay Requests without a response. */ + u32_t pdelay_allowed_lost_resp_exceed_count; + + /** Number of Sync messages sent. */ + u32_t tx_sync_count; + + /** Number of Follow Up messages sent. */ + u32_t tx_fup_count; + + /** Number of Path Delay Request messages sent. */ + u32_t tx_pdelay_req_count; + + /** Number of Path Delay Response messages sent. */ + u32_t tx_pdelay_resp_count; + + /** Number of Path Delay Response messages sent. */ + u32_t tx_pdelay_resp_fup_count; + + /** Number of Announce messages sent. */ + u32_t tx_announce_count; + + /** Neighbor propagation delay threshold exceeded. */ + u32_t neighbor_prop_delay_exceeded; +}; + +/** + * @brief gPTP domain. + * + * Data Set containing all the information necessary to represent + * one time-aware system domain. + */ +struct gptp_domain { + /** Global Data Set for this gPTP domain. */ + struct gptp_global_ds global_ds; + + /** Default Data Set for this gPTP domain. */ + struct gptp_default_ds default_ds; + + /** Current Data Set for this gPTP domain. */ + struct gptp_current_ds current_ds; + + /** Parent Data Set for this gPTP domain. */ + struct gptp_parent_ds parent_ds; + + /** Time Properties Data Set for this gPTP domain. */ + struct gptp_time_prop_ds properties_ds; + + /** Current State of the MI State Machines for this gPTP domain. */ + struct gptp_states state; + + /** Port Parameter Data Sets for this gPTP domain. */ + struct gptp_port_ds port_ds[CONFIG_NET_GPTP_NUM_PORTS]; + +#if defined(CONFIG_NET_GPTP_STATISTICS) + /** Port Parameter Statistics Data Sets for this gPTP domain. */ + struct gptp_port_param_ds port_param_ds[CONFIG_NET_GPTP_NUM_PORTS]; +#endif /* CONFIG_NET_GPTP_STATISTICS */ + + /** Current States of the MD State Machines for this gPTP domain. */ + struct gptp_port_states port_state[CONFIG_NET_GPTP_NUM_PORTS]; + + /** Shared data between BMCA State Machines for this gPTP domain. */ + struct gptp_port_bmca_data port_bmca_data[CONFIG_NET_GPTP_NUM_PORTS]; + + /* Network interface linked to the PTP PORT. */ + struct net_if *iface[CONFIG_NET_GPTP_NUM_PORTS]; +}; + +/** + * @brief Get port specific data from gPTP domain. + * @details This contains all the configuration / status of the gPTP domain. + * + * @param domain gPTP domain + * @param port Port id + * @param port_ds Port specific parameter data set (returned to caller) + * @param port_param_ds Port parameter statistics data set (returned to caller) + * @param port_state Port specific states data set (returned to caller) + * @param port_bmca Port BMCA state machine specific data (returned to caller) + * @param iface Port specific parameter data set (returned to caller) + * + * @return 0 if ok, < 0 if error + */ +int gptp_get_port_data(struct gptp_domain *domain, int port, + struct gptp_port_ds **port_ds, + struct gptp_port_param_ds **port_param_ds, + struct gptp_port_states **port_state, + struct gptp_port_bmca_data **port_bmca_data, + struct net_if **iface); + +#endif /* CONFIG_NET_GPTP */ + +#ifdef __cplusplus +} +#endif + +#endif /* __GPTP_DS_H */ diff --git a/include/net/gptp_md.h b/include/net/gptp_md.h new file mode 100644 index 0000000000000..06ff11258fdd0 --- /dev/null +++ b/include/net/gptp_md.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief GPTP Media Dependent interface for full duplex and point to point + * + * This is not to be included by the application. + */ + +#ifndef __GPTP_MD_H +#define __GPTP_MD_H + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(CONFIG_NET_GPTP) + +#include + +/** + * @brief Media Dependent Sync Information. + * + * This structure applies for MDSyncReceive as well as MDSyncSend. + */ +struct gptp_md_sync_info { + /** PortIdentity of this port. */ + struct gptp_port_identity src_port_id; + + /* Time of the current grandmaster compared to the previous. */ + struct gptp_scaled_ns last_gm_phase_change; + + /** Most recent preciseOriginTimestamp from the PortSyncSync. */ + struct net_ptp_time precise_orig_ts; + + /** Most recent followupCorrectionField from the PortSyncSync. */ + s64_t follow_up_correction_field; + + /** Most recent upstreamTxTime from the PortSyncSync. */ + u64_t upstream_tx_time; + + /* Frequency of the current grandmaster compared to the previous. */ + double last_gm_freq_change; + + /** Most recent rateRatio from the PortSyncSync. */ + double rate_ratio; + + /* Time Base Indicator of the current Grand Master. */ + u16_t gm_time_base_indicator; + + /** Current Log Sync Interval for this port. */ + s8_t log_msg_interval; +}; + +/** + * @brief Initialize all Media Dependent State Machines. + */ +void gptp_md_init_state_machine(void); + +/** + * @brief Run all Media Dependent State Machines. + * + * @param port Number of the port the State Machines needs to be run on. + */ +void gptp_md_state_machines(int port); + +#endif /* CONFIG_NET_GPTP */ + +#ifdef __cplusplus +} +#endif + +#endif /* __GPTP_MD_H */ diff --git a/include/net/gptp_messages.h b/include/net/gptp_messages.h new file mode 100644 index 0000000000000..7ac61c4c19dcb --- /dev/null +++ b/include/net/gptp_messages.h @@ -0,0 +1,600 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief gPTP message helpers. + * + * This is not to be included by the application. + */ + +#ifndef __GPTP_MESSAGES_H +#define __GPTP_MESSAGES_H + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(CONFIG_NET_GPTP) + +#include +#include + +#define GPTP_CLOCK_ID_LEN 8 + +/* Helpers to access gPTP messages. */ +#define GPTP_HDR(pkt) ((struct gptp_hdr *)net_pkt_ip_data(pkt)) +#define GPTP_ANNOUNCE(pkt) ((struct gptp_announce *)gptp_data(pkt)) +#define GPTP_SIGNALING(pkt) ((struct gptp_signaling *)gptp_data(pkt)) +#define GPTP_SYNC(pkt) ((struct gptp_sync *)gptp_data(pkt)) +#define GPTP_FOLLOW_UP(pkt) ((struct gptp_follow_up *)gptp_data(pkt)) +#define GPTP_DELAY_REQ(pkt) \ + ((struct gptp_delay_req *)gptp_data(pkt)) +#define GPTP_PDELAY_REQ(pkt) \ + ((struct gptp_pdelay_req *)gptp_data(pkt)) +#define GPTP_PDELAY_RESP(pkt) \ + ((struct gptp_pdelay_resp *)gptp_data(pkt)) +#define GPTP_PDELAY_RESP_FOLLOWUP(pkt) \ + ((struct gptp_pdelay_resp_follow_up *)gptp_data(pkt)) + +/* Field values. */ +#define GPTP_TRANSPORT_802_1_AS 0x1 +#define GPTP_VERSION 0x2 + +/* Message types. */ +#define GPTP_SYNC_MESSAGE 0x0 +#define GPTP_DELAY_REQ_MESSAGE 0x1 +#define GPTP_PATH_DELAY_REQ_MESSAGE 0x2 +#define GPTP_PATH_DELAY_RESP_MESSAGE 0x3 +#define GPTP_FOLLOWUP_MESSAGE 0x8 +#define GPTP_DELAY_RESP_MESSAGE 0x9 +#define GPTP_PATH_DELAY_FOLLOWUP_MESSAGE 0xA +#define GPTP_ANNOUNCE_MESSAGE 0xB +#define GPTP_SIGNALING_MESSAGE 0xC +#define GPTP_MANAGEMENT_MESSAGE 0xD + +/* Message Lengths. */ +#define GPTP_PACKET_LEN(pkt) net_pkt_get_len(pkt) +#define GPTP_VALID_LEN(pkt, len) \ + (len > (NET_ETH_MINIMAL_FRAME_SIZE - GPTP_L2_HDR_LEN(pkt))) +#define GPTP_L2_HDR_LEN(pkt) \ + ((int)GPTP_HDR(pkt) - (int)NET_ETH_HDR(pkt)) + +#define GPTP_SYNC_LEN \ + (sizeof(struct gptp_hdr) + sizeof(struct gptp_sync)) +#define GPTP_FOLLOW_UP_LEN \ + (sizeof(struct gptp_hdr) + sizeof(struct gptp_follow_up)) +#define GPTP_PDELAY_REQ_LEN \ + (sizeof(struct gptp_hdr) + sizeof(struct gptp_pdelay_req)) +#define GPTP_PDELAY_RESP_LEN \ + (sizeof(struct gptp_hdr) + sizeof(struct gptp_pdelay_resp)) +#define GPTP_PDELAY_RESP_FUP_LEN \ + (sizeof(struct gptp_hdr) + sizeof(struct gptp_pdelay_resp_follow_up)) +#define GPTP_SIGNALING_LEN \ + (sizeof(struct gptp_hdr) + sizeof(struct gptp_signaling)) + +/* For the Announce message, the TLV is variable length. The len field + * indicates the length of the TLV not accounting for tlvType and lengthField + * which are 4 bytes. + */ +#define GPTP_ANNOUNCE_LEN(pkt) \ + (sizeof(struct gptp_hdr) + sizeof(struct gptp_announce) \ + + ntohs(GPTP_ANNOUNCE(pkt)->tlv.len) \ + - sizeof(struct gptp_path_trace_tlv) + 4) + +#define GPTP_CHECK_LEN(pkt, len) \ + ((GPTP_PACKET_LEN(pkt) != len) && (GPTP_VALID_LEN(pkt, len))) +#define GPTP_ANNOUNCE_CHECK_LEN(pkt) \ + ((GPTP_PACKET_LEN(pkt) != GPTP_ANNOUNCE_LEN(pkt)) && \ + (GPTP_VALID_LEN(pkt, GPTP_ANNOUNCE_LEN(pkt)))) + +/* Header Flags. Byte 0. */ +#define GPTP_FLAG_TWO_STEP 0x02 + +/* Header Flags. Byte 1. */ +#define GPTP_FLAG_LEAP61 0x01 +#define GPTP_FLAG_LEAP59 0x02 +#define GPTP_FLAG_CUR_UTC_OFF_VALID 0x04 +#define GPTP_FLAG_PTP_TIMESCALE 0x08 +#define GPTP_FLAG_TIME_TRACEABLE 0x10 +#define GPTP_FLAG_FREQ_TRACEABLE 0x20 + +/* Signaling Interval Flags. */ +#define GPTP_FLAG_COMPUTE_NEIGHBOR_RATE_RATIO 0x1 +#define GPTP_FLAG_COMPUTE_NEIGHBOR_PROP_DELAY 0x2 + +/* Signaling Interval Values. */ +#define GPTP_ITV_KEEP -128 +#define GPTP_ITV_SET_TO_INIT 126 +#define GPTP_ITV_STOP 127 + +/* Control. Only set for header compatibility with v1. */ +#define GPTP_SYNC_CONTROL_VALUE 0x0 +#define GPTP_FUP_CONTROL_VALUE 0x2 +#define GPTP_OTHER_CONTROL_VALUE 0x5 + +/* Other default values. */ +#define GPTP_RESP_LOG_MSG_ITV 0x7F +#define GPTP_ANNOUNCE_MSG_PATH_SEQ_TYPE htons(0x8) + +/* Organization Id used for TLV. */ +#define GPTP_FUP_TLV_ORG_ID_BYTE_0 0x00 +#define GPTP_FUP_TLV_ORG_ID_BYTE_1 0x80 +#define GPTP_FUP_TLV_ORG_ID_BYTE_2 0xC2 +#define GPTP_FUP_TLV_ORG_SUB_TYPE 1 + +/** + * @brief gPTP Clock Quality + * + * Defines the quality of a clock. + * This is used by the Best Master Clock Algorithm. + */ +struct gptp_clock_quality { + u8_t clock_class; + u8_t clock_accuracy; + u16_t offset_scaled_log_var; +} __packed; + +/** + * @brief gPTP Root System Identity + * + * Defines the Grand Master of a clock. + * This is used by the Best Master Clock Algorithm. + */ +struct gptp_root_system_identity { + /** Grand Master priority1 component. */ + u8_t grand_master_prio1; + + /** Grand Master clock quality. */ + struct gptp_clock_quality clk_quality; + + /** Grand Master priority2 component. */ + u8_t grand_master_prio2; + + /** Grand Master clock identity. */ + u8_t grand_master_id[GPTP_CLOCK_ID_LEN]; +} __packed; + +/** + * @brief Port Identity. + */ +struct gptp_port_identity { + /** Clock identity of the port. */ + u8_t clk_id[GPTP_CLOCK_ID_LEN]; + + /** Number of the port. */ + u16_t port_number; +} __packed; + +struct gptp_flags { + union { + /** Bit access. */ + struct { + u8_t reserved0 : 1; + + /** Two step flag. */ + u8_t two_step : 1; + + u8_t reserved1 : 6; + + /** Leap61 flag. */ + u8_t leap61 :1; + + /** Leap59 flag. */ + u8_t leap59 :1; + + /** Current UTC offset valid flag. */ + u8_t current_utc_offset_valid :1; + + /** PTP timescale flag. */ + u8_t ptp_timescale :1; + + /** Time traceable flag. */ + u8_t time_traceable :1; + + /** Frequency traceable flag. */ + u8_t freq_traceable :1; + }; + + /** Byte access. */ + u8_t octets[2]; + + /** Whole field access. */ + u16_t all; + }; +} __packed; + +/* Definition of all message types as defined by IEEE802.1AS. */ + +struct gptp_hdr { + /** Type of the message. */ + u8_t message_type:4; + + /** Transport specific, always 1. */ + u8_t transport_specific:4; + + /** Version of the PTP, always 2. */ + u8_t ptp_version:4; + + /** Reserved field. */ + u8_t reserved0:4; + + /** Total length of the message from the header to the last TLV. */ + u16_t message_length; + + /** Domain number, always 0. */ + u8_t domain_number; + + /** Reserved field. */ + u8_t reserved1; + + /** Message flags. */ + struct gptp_flags flags; + + /** Correction Field. The content depends of the message type. */ + s64_t correction_field; + + /** Reserved field. */ + u32_t reserved2; + + /** Port Identity of the sender. */ + struct gptp_port_identity port_id; + + /** Sequence Id. */ + u16_t sequence_id; + + /** Control value. Sync: 0, Follow-up: 2, Others: 5. */ + u8_t control; + + /** Message Interval in Log2 for Sync and Announce messages. */ + s8_t log_msg_interval; +} __packed; + +struct gptp_path_trace_tlv { + /** TLV type: 0x8. */ + u16_t type; + + /** Length. Number of TLVs * 8 bytes. */ + u16_t len; + + /** ClockIdentity array of the successive time-aware systems. */ + u8_t path_sequence[1][8]; +} __packed; + +struct gptp_announce { + /** Reserved fields. */ + u8_t reserved1[10]; + + /** Current UTC offset. */ + s16_t cur_utc_offset; + + /** Reserved field. */ + u8_t reserved2; + + /* gmPriorityVector priority 1 of the peer sending the message. */ + struct gptp_root_system_identity root_system_id; + + /** masterStepsRemoved of the peer sending the message. */ + u16_t steps_removed; + + /** timeSource of the peer sending the message. */ + u8_t time_source; + + /* Path Trace TLV. This field has a variable length. */ + struct gptp_path_trace_tlv tlv; +} __packed; + +struct gptp_sync { + /** Reserved field. This field is used for PTPv2, unused in gPTP. */ + u8_t reserved[10]; +} __packed; + +struct gptp_follow_up_tlv { + /** TLV type: 0x3. */ + u16_t type; + + /** Length: 28. */ + u16_t len; + + /** Organization Id: 00-80-C2. */ + u8_t org_id[3]; + + /** Organization Sub Type: 1. */ + u8_t org_sub_type[3]; + + /** Rate ratio relative to the grand master of the peer. */ + s32_t cumulative_scaled_rate_offset; + + /** Time Base Indicator of the current Grand Master. */ + u16_t gm_time_base_indicator; + + /** Difference of the time between the current GM and the previous. */ + struct gptp_scaled_ns last_gm_phase_change; + + /** Diff of the frequency between the current GM and the previous. */ + s32_t scaled_last_gm_freq_change; +} __packed; + +struct gptp_follow_up { + /** Higher 16 bits of the seconds at which the sync was sent. */ + u16_t prec_orig_ts_secs_high; + + /** Lower 32 bits of the seconds at which the sync was sent. */ + u32_t prec_orig_ts_secs_low; + + /** Nanoseconds at which the sync was sent. */ + u32_t prec_orig_ts_nsecs; + + /** Follow up TLV. */ + struct gptp_follow_up_tlv tlv; +} __packed; + +struct gptp_pdelay_req { + /** Reserved fields. */ + u8_t reserved1[10]; + + /** Reserved fields. */ + u8_t reserved2[10]; +} __packed; + +struct gptp_pdelay_resp { + /** Higher 16 bits of the seconds at which the request was received. */ + u16_t req_receipt_ts_secs_high; + + /** Lower 32 bits of the seconds at which the request was received. */ + u32_t req_receipt_ts_secs_low; + + /** Nanoseconds at which the pdelay request was received. */ + u32_t req_receipt_ts_nsecs; + + /** Source Port Id of the Path Delay Request. */ + struct gptp_port_identity requesting_port_id; +} __packed; + +struct gptp_pdelay_resp_follow_up { + /** Higher 16 bits of the seconds at which the response was sent. */ + u16_t resp_orig_ts_secs_high; + + /** Lower 32 bits of the seconds at which the response was sent. */ + u32_t resp_orig_ts_secs_low; + + /** Nanoseconds at which the response was received. */ + u32_t resp_orig_ts_nsecs; + + /** Source Port Id of the Path Delay Request. */ + struct gptp_port_identity requesting_port_id; +} __packed; + +struct gptp_message_itv_req_tlv { + /** TLV type: 0x3. */ + u16_t type; + + /** Length field: 12. */ + u16_t len; + + /** Organization Id: 00-80-C2. */ + u8_t org_id[3]; + + /** Organization sub type: 0x2. */ + u8_t org_sub_type[3]; + + /** Log to base 2 of the mean time interval between pdelay requests. */ + s8_t link_delay_itv; + + /** Log to base 2 of the mean time interval between syncs. */ + s8_t time_sync_itv; + + /** Log to base 2 of the mean time interval between announces. */ + s8_t announce_itv; + + /** Flags (computeNeighborRateRatio and computeNeighborPropDelay). */ + union { + struct { + u8_t compute_neighbor_rate_ratio : 1; + u8_t compute_neighbor_prop_delay : 1; + }; + u8_t flags; + }; + /** Reserved fields. */ + u8_t reserved[2]; +} __packed; + +struct gptp_signaling { + /** Target Port Identity , always 0xFF. */ + struct gptp_port_identity target_port_id; + + /** Message Interval TLV. */ + struct gptp_message_itv_req_tlv tlv; +} __packed; + +/** + * @brief Compute gPTP message location. + * + * @param pkt Network Buffer containing a gPTP message. + * + * @return Pointer to the start of the gPTP message inside the packet. + */ +static inline u8_t *gptp_data(struct net_pkt *pkt) +{ + return &pkt->frags->data[sizeof(struct gptp_hdr)]; +} + +/* Functions to prepare messages. */ + +/** + * @brief Prepare Sync message. + * + * @param port gPTP port number. + * + * @return Pointer to the prepared Network Buffer. + */ +struct net_pkt *gptp_prepare_sync(int port); + +/** + * @brief Prepare Follow Up message. + * + * @param port gPTP port number. + * + * @return Pointer to the prepared Network Buffer. + */ +struct net_pkt *gptp_prepare_follow_up(int port, struct net_pkt *sync); + +/** + * @brief Prepare Path Delay Request message. + * + * @param port gPTP port number. + * + * @return Pointer to the prepared Network Buffer. + */ +struct net_pkt *gptp_prepare_pdelay_req(int port); + +/** + * @brief Prepare Path Delay Response message. + * + * @param port gPTP port number. + * @param req Path Delay Request to reply to. + * + * @return Pointer to the prepared Network Buffer. + */ +struct net_pkt *gptp_prepare_pdelay_resp(int port, + struct net_pkt *req); + +/** + * @brief Prepare Announce message. + * + * @param port gPTP port number. + * + * @return Pointer to the prepared Network Buffer. + */ +struct net_pkt *gptp_prepare_announce(int port); + +/** + * @brief Prepare Path Delay Response message. + * + * @param port gPTP port number. + * @param resp Related Path Delay Follow Up. + * + * @return Pointer to the prepared Network Buffer. + */ +struct net_pkt *gptp_prepare_pdelay_follow_up(int port, + struct net_pkt *resp); + +/* Functions to handle received messages. */ + +/** + * @brief Handle Sync message. + * + * @param port gPTP port number. + * @param pkt Network Buffer. + */ +void gptp_handle_sync(int port, struct net_pkt *pkt); + +/** + * @brief Handle Follow Up message. + * + * @param port gPTP port number. + * @param pkt Network Buffer to parse. + * + * @return 0 if success, Error Code otherwise. + */ +int gptp_handle_follow_up(int port, struct net_pkt *pkt); + +/** + * @brief Handle Path Delay Request message. + * + * @param port gPTP port number. + * @param pkt Network Buffer. + */ +void gptp_handle_pdelay_req(int port, struct net_pkt *pkt); + +/** + * @brief Handle Path Delay Response message. + * + * @param port gPTP port number. + * @param pkt Network Buffer to parse. + * + * @return 0 if success, Error Code otherwise. + */ +int gptp_handle_pdelay_resp(int port, struct net_pkt *pkt); + +/** + * @brief Handle Path Delay Follow Up message. + * + * @param port gPTP port number. + * @param pkt Network Buffer to parse. + * + * @return 0 if success, Error Code otherwise. + */ +int gptp_handle_pdelay_follow_up(int port, struct net_pkt *pkt); + +/** + * @brief Handle Signaling message. + * + * @param port gPTP port number. + * @param pkt Network Buffer + */ +void gptp_handle_signaling(int port, struct net_pkt *pkt); + +/* Functions to send messages. */ + +/** + * @brief Send a Sync message. + * + * @param port gPTP port number. + * @param pkt Sync message. + */ +void gptp_send_sync(int port, struct net_pkt *pkt); + +/** + * @brief Send a Follow Up message. + * + * @param port gPTP port number. + * @param pkt Follow Up message. + */ +void gptp_send_follow_up(int port, struct net_pkt *pkt); + +/** + * @brief Send an Announce message. + * + * @param port gPTP port number. + * @param pkt Announce message. + */ +void gptp_send_announce(int port, struct net_pkt *pkt); + +/** + * @brief Send a Path Delay Request on the given port. + * + * @param port gPTP port number. + */ +void gptp_send_pdelay_req(int port); + +/** + * @brief Send a Path Delay Response for the given Path Delay Request. + * + * @param port gPTP port number. + * @param pkt Network Buffer containing the prepared Path Delay Response. + * @param treq Time at which the Path Delay Request was received. + */ +void gptp_send_pdelay_resp(int port, struct net_pkt *pkt, + struct net_ptp_time *treq); + +/** + * @brief Send a Path Delay Response for the given Path Delay Request. + * + * @param port gPTP port number. + * @param pkt Network Buffer containing the prepared Path Delay Follow Up. + * @param tresp Time at which the Path Delay Response was sent. + */ +void gptp_send_pdelay_follow_up(int port, struct net_pkt *pkt, + struct net_ptp_time *tresp); + +#endif /* CONFIG_NET_GPTP */ + +#ifdef __cplusplus +} +#endif + +#endif /* __GPTP_MESSAGES_H */ diff --git a/include/net/gptp_mi.h b/include/net/gptp_mi.h new file mode 100644 index 0000000000000..e76d816d1e096 --- /dev/null +++ b/include/net/gptp_mi.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief GPTP Media Independent interface + * + * This is not to be included by the application. + */ + +#ifndef __GPTP_MI_H +#define __GPTP_MI_H + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(CONFIG_NET_GPTP) + +#include + +/** + * @brief Media Independent Sync Information. + * + * This structure applies for MDSyncReceive as well as MDSyncSend. + */ +struct gptp_mi_port_sync_sync { + /** Port to which the Sync Information belongs to. */ + u16_t local_port_number; + + /** Time at which the sync receipt timeout occurs. */ + u64_t sync_receipt_timeout_time; + + /** Copy of the gptp_md_sync_info to be transmitted. */ + struct gptp_md_sync_info sync_info; +}; + +/** + * @brief Initialize all Media Independent State Machines. + */ +void gptp_mi_init_state_machine(void); + +/** + * @brief Run all Media Independent Port Sync State Machines. + * + * @param port Number of the port the State Machines needs to be run on. + */ +void gptp_mi_port_sync_state_machines(int port); + +/** + * @brief Run all Media Independent Port BMCA State Machines. + * + * @param port Number of the port the State Machines needs to be run on. + */ +void gptp_mi_port_bmca_state_machines(int port); + +/** + * @brief Run all Media Independent State Machines. + */ +void gptp_mi_state_machines(void); + +#endif /* CONFIG_NET_GPTP */ + +#ifdef __cplusplus +} +#endif + +#endif /* __GPTP_MI_H */ diff --git a/include/net/gptp_state.h b/include/net/gptp_state.h new file mode 100644 index 0000000000000..243edcb5001c8 --- /dev/null +++ b/include/net/gptp_state.h @@ -0,0 +1,534 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** @file + * @brief PTP state machines + * + * This is not to be included by the application. + */ + +#ifndef __GPTP_STATE_H +#define __GPTP_STATE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(CONFIG_NET_GPTP) + +#include + +/* PDelayRequest states. */ +enum gptp_pdelay_req_states { + GPTP_PDELAY_REQ_NOT_ENABLED, + GPTP_PDELAY_REQ_INITIAL_SEND_REQ, + GPTP_PDELAY_REQ_RESET, + GPTP_PDELAY_REQ_SEND_REQ, + GPTP_PDELAY_REQ_WAIT_RESP, + GPTP_PDELAY_REQ_WAIT_FOLLOW_UP, + GPTP_PDELAY_REQ_WAIT_ITV_TIMER, +}; + +/* Path Delay Response states. */ +enum gptp_pdelay_resp_states { + GPTP_PDELAY_RESP_NOT_ENABLED, + GPTP_PDELAY_RESP_INITIAL_WAIT_REQ, + GPTP_PDELAY_RESP_WAIT_REQ, + GPTP_PDELAY_RESP_WAIT_TSTAMP, +}; + +/* SyncReceive states. */ +enum gptp_sync_rcv_states { + GPTP_SYNC_RCV_DISCARD, + GPTP_SYNC_RCV_WAIT_SYNC, + GPTP_SYNC_RCV_WAIT_FOLLOW_UP, +}; + +/* SyncSend states. */ +enum gptp_sync_send_states { + GPTP_SYNC_SEND_INITIALIZING, + GPTP_SYNC_SEND_SEND_SYNC, + GPTP_SYNC_SEND_SEND_FUP, +}; + +/* PortSyncSyncReceive states. */ +enum gptp_pss_rcv_states { + GPTP_PSS_RCV_DISCARD, + GPTP_PSS_RCV_RECEIVED_SYNC, +}; + +/* PortSyncSyncSend states. */ +enum gptp_pss_send_states { + GPTP_PSS_SEND_TRANSMIT_INIT, + GPTP_PSS_SEND_SYNC_RECEIPT_TIMEOUT, + GPTP_PSS_SEND_SEND_MD_SYNC, + GPTP_PSS_SEND_SET_SYNC_RECEIPT_TIMEOUT, +}; + +/* SiteSyncSyncReceive states. */ +enum gptp_site_sync_sync_states { + GPTP_SSS_INITIALIZING, + GPTP_SSS_RECEIVING_SYNC, +}; + +/* ClockSlaveSync states. */ +enum gptp_clk_slave_sync_states { + GPTP_CLK_SLAVE_SYNC_INITIALIZING, + GPTP_CLK_SLAVE_SYNC_SEND_SYNC_IND, +}; + +/* PortAnnounceReceive states. */ +enum gptp_pa_rcv_states { + GPTP_PA_RCV_DISCARD, + GPTP_PA_RCV_RECEIVE, +}; + +/* PortAnnounceInformation states. */ +enum gptp_pa_info_states { + GPTP_PA_INFO_DISABLED, + /* State to handle the transition after DISABLED state. */ + GPTP_PA_INFO_POST_DISABLED, + GPTP_PA_INFO_AGED, + GPTP_PA_INFO_UPDATE, + GPTP_PA_INFO_CURRENT, + GPTP_PA_INFO_RECEIVE, + GPTP_PA_INFO_SUPERIOR_MASTER_PORT, + GPTP_PA_INFO_REPEATED_MASTER_PORT, + GPTP_PA_INFO_INFERIOR_MASTER_OR_OTHER_PORT, +}; + +/* PortRoleSelection states. */ +enum gptp_pr_selection_states { + GPTP_PR_SELECTION_INIT_BRIDGE, + GPTP_PR_SELECTION_ROLE_SELECTION, +}; + +/* PortAnnounceTransmit states. */ +enum gptp_pa_transmit_states { + GPTP_PA_TRANSMIT_INIT, + GPTP_PA_TRANSMIT_PERIODIC, + GPTP_PA_TRANSMIT_IDLE, + GPTP_PA_TRANSMIT_POST_IDLE, +}; + +/* ClockMasterSyncReceive states. */ +enum gptp_cms_rcv_states { + GPTP_CMS_RCV_INITIALIZING, + GPTP_CMS_RCV_WAITING, + GPTP_CMS_RCV_SOURCE_TIME, +}; + +/* Info_is enumeration2. */ +enum gptp_info_is { + GPTP_INFO_IS_RECEIVED, + GPTP_INFO_IS_MINE, + GPTP_INFO_IS_AGED, + GPTP_INFO_IS_DISABLED, +}; + +enum gptp_time_source { + GPTP_TS_ATOMIC_CLOCK = 0x10, + GPTP_TS_GPS = 0x20, + GPTP_TS_TERRESTRIAL_AUDIO = 0x30, + GPTS_TS_PTP = 0x40, + GPTP_TS_NTP = 0x50, + GPTP_TS_HAND_SET = 0x60, + GPTP_TS_OTHER = 0x90, + GPTP_TS_INTERNAL_OSCILLATOR = 0xA0, +}; + +/** + * @brief gPTP time-synchronization spanning tree priority vector + * + * Defines the best master selection information. + */ +struct gptp_priority_vector { + /** Identity of the source clock. */ + struct gptp_root_system_identity root_system_id; + + /** Port identity of the transmitting time-aware system. */ + struct gptp_port_identity src_port_id; + + /** portNumber of the receiving port. */ + u16_t port_number; + + /** Steps removed from the announce message transmitter and the + * master clock. + */ + u16_t steps_removed; +} __packed; + +/* Pdelay Request state machine variables. */ +struct gptp_pdelay_req_state { + /** Initial Path Delay Response Peer Timestamp. */ + u64_t ini_resp_evt_tstamp; + + /** Initial Path Delay Response Ingress Timestamp. */ + u64_t ini_resp_ingress_tstamp; + + /** Timer for the Path Delay Request. */ + struct k_timer pdelay_timer; + + /** Pointer to the received Path Delay Response. */ + struct net_pkt *rcvd_pdelay_resp_ptr; + + /** Pointer to the received Path Delay Follow Up. */ + struct net_pkt *rcvd_pdelay_follow_up_ptr; + + /** Pointer to the Path Delay Request to be transmitted. */ + struct net_pkt *tx_pdelay_req_ptr; + + /** Current state of the state machine. */ + enum gptp_pdelay_req_states state; + + /** Path Delay Response messages received. */ + u32_t rcvd_pdelay_resp; + + /** Path Delay Follow Up messages received. */ + u32_t rcvd_pdelay_follow_up; + + /** Number of lost Path Delay Responses. */ + u16_t lost_responses; + + /** Timer expired, a new Path Delay Request needs to be sent. */ + bool pdelay_timer_expired; + + /** NeighborRateRatio has been computed successfully. */ + bool neighbor_rate_ratio_valid; + + /** Path Delay has already been computed after initialization. */ + bool init_pdelay_compute; + + /** Count consecutive Pdelay_req with multiple responses. */ + u8_t multiple_resp_count; +}; + +/** + * @brief Pdelay Response state machine variables. + */ +struct gptp_pdelay_resp_state { + /** Current state of the state machine. */ + enum gptp_pdelay_resp_states state; +}; + +/* Sync Receive state machine variables. */ +struct gptp_sync_rcv_state { + /** Time at which a Sync Message without Follow Up will be discarded. */ + u64_t follow_up_receipt_timeout; + + /** Timer for the Follow Up discard. */ + struct k_timer follow_up_discard_timer; + + /** Pointer to the received Sync message. */ + struct net_pkt *rcvd_sync_ptr; + + /** Pointer to the received Follow Up message. */ + struct net_pkt *rcvd_follow_up_ptr; + + /** Current state of the state machine. */ + enum gptp_sync_rcv_states state; + + /** A Sync Message has been received. */ + bool rcvd_sync; + + /** A Follow Up Message has been received. */ + bool rcvd_follow_up; + + /** A Follow Up Message has been received. */ + bool follow_up_timeout_expired; +}; + +/* Sync Send state machine variables. */ +struct gptp_sync_send_state { + /** Pointer to the received MDSyncSend structure. */ + struct gptp_md_sync_info *sync_send_ptr; + + /** Pointer to the sync message to be sent. */ + struct net_pkt *sync_ptr; + + /** Current state of the state machine. */ + enum gptp_sync_send_states state; + + /** A MDSyncSend structure has been received. */ + bool rcvd_md_sync; + + /** The timestamp for the sync message has been received. */ + bool md_sync_timestamp_avail; +}; + +/* Port Sync Sync Receive state machine variables. */ +struct gptp_pss_rcv_state { + /** Sync receive provided by the MD Sync Receive State Machine. */ + struct gptp_md_sync_info sync_rcv; + + /** PortSyncSync structure to be transmitted to the Site Sync Sync. */ + struct gptp_mi_port_sync_sync pss; + + /** SyncReceiptTimeoutTimer for PortAnnounce state machines. */ + struct k_timer sync_receipt_timeout_timer; + + /** Ratio of the Grand Master frequency with the Local Clock. */ + double rate_ratio; + + /** Current state of the state machine. */ + enum gptp_pss_rcv_states state; + + /** A MDSyncReceive structure is ready to be processed. */ + bool rcvd_md_sync; + + /** Expiry of SyncReceiptTimeoutTimer. */ + bool sync_receipt_timeout_timer_expired; +}; + +/* Port Sync Sync Send state machine variables. */ +struct gptp_pss_send_state { + /** Sync send to be transmitted to the MD Sync Send State Machine. */ + struct gptp_md_sync_info sync_send; + + /** Source Port Identity of the last received PortSyncSync. */ + struct gptp_port_identity last_src_port_id; + + /** Precise Origin Timestamp of the last received PortSyncSync. */ + struct net_ptp_time last_precise_orig_ts; + + /** Half Sync Interval Timer. */ + struct k_timer half_sync_itv_timer; + + /** syncReceiptTimeout Timer. */ + struct k_timer sync_receipt_timeout_timer; + + /** GM Phase Change of the last received PortSyncSync. */ + struct gptp_scaled_ns last_gm_phase_change; + + /** Follow Up Correction Field of the last received PortSyncSync. */ + s64_t last_follow_up_correction_field; + + /** Upstream Tx Time of the last received PortSyncSync. */ + u64_t last_upstream_tx_time; + + /** Sync Receipt Timeout Time of the last received PortSyncSync. */ + u64_t last_sync_receipt_timeout_time; + + /** PortSyncSync structure received from the SiteSyncSync. */ + struct gptp_mi_port_sync_sync *pss_sync_ptr; + + /** Rate Ratio of the last received PortSyncSync. */ + double last_rate_ratio; + + /** GM Freq Change of the last received PortSyncSync. */ + double last_gm_freq_change; + + /** Current state of the state machine. */ + enum gptp_pss_send_states state; + + /** GM Time Base Indicator of the last received PortSyncSync. */ + u16_t last_gm_time_base_indicator; + + /** Received Port Number of the last received PortSyncSync. */ + u16_t last_rcvd_port_num; + + /** A PortSyncSync structure is ready to be processed. */ + bool rcvd_pss_sync; + + /** Flag when the half_sync_itv_timer has expired. */ + bool half_sync_itv_timer_expired; + + /** Flag when the half_sync_itv_timer has expired twice. */ + bool sync_itv_timer_expired; + + /** Flag when the syncReceiptTimeoutTime has expired. */ + bool sync_receipt_timeout_timer_expired; +}; + +/* Site Sync Sync state machine variables. */ +struct gptp_site_sync_sync_state { + /** PortSyncSync structure to be sent to other ports and to the Slave. + */ + struct gptp_mi_port_sync_sync pss_send; + + /** Pointer to the PortSyncSync structure received. */ + struct gptp_mi_port_sync_sync *pss_rcv_ptr; + + /** Current state of the state machine. */ + enum gptp_site_sync_sync_states state; + + /** A PortSyncSync structure is ready to be processed. */ + bool rcvd_pss; +}; + +/* Clock Slave Sync state machine variables. */ +struct gptp_clk_slave_sync_state { + /** Pointer to the PortSyncSync structure received. */ + struct gptp_mi_port_sync_sync *pss_rcv_ptr; + + /** Current state of the state machine. */ + enum gptp_clk_slave_sync_states state; + + /** A PortSyncSync structure is ready to be processed. */ + bool rcvd_pss; + + /** The local clock has expired. */ + bool rcvd_local_clk_tick; +}; + +/* Clock Master Sync state machine variables. */ +struct gptp_clk_master_sync_state { + /** Current state of the state machine */ + enum gptp_cms_rcv_states state; + + /** A ClockSourceTime.invoke function is received from the + * Clock source entity + */ + bool rcvd_clock_source_req; + + /** The local clock has expired */ + bool rcvd_local_clock_tick; +}; + +/* Port Announce Receive state machine variables. */ +struct gptp_port_announce_receive_state { + /** Current state of the state machine. */ + enum gptp_pa_rcv_states state; + + /** An announce message is ready to be processed. */ + bool rcvd_announce; +}; + +struct gptp_port_announce_information_state { + /** Timer for the announce expiry. */ + struct k_timer ann_rcpt_expiry_timer; + + /** PortRoleInformation state machine variables. */ + enum gptp_pa_info_states state; + + /* Expired announce information. */ + bool ann_expired; +}; + +/* Port Role Selection state machine variables. */ +struct gptp_port_role_selection_state { + enum gptp_pr_selection_states state; +}; + +/** + * @brief Port Announce Transmit state machine variables. + */ +struct gptp_port_announce_transmit_state { + /** Timer for the announce expiry. */ + struct k_timer ann_send_periodic_timer; + + /** PortRoleTransmit state machine variables. */ + enum gptp_pa_transmit_states state; + + /** Trigger announce information. */ + bool ann_trigger; +}; + +/** + * @brief Structure maintaining per Time-Aware States. + */ +struct gptp_states { + /** SiteSyncSync state machine variables. */ + struct gptp_site_sync_sync_state site_ss; + + /** ClockSlaveSync state machine variables. */ + struct gptp_clk_slave_sync_state clk_slave_sync; + + /** PortRoleSelection state machine variables. */ + struct gptp_port_role_selection_state pr_sel; + + /** ClockMasterSyncReceive state machine variables. */ + struct gptp_clk_master_sync_state clk_master_sync_receive; +}; + +/** + * @brief Structure maintaining per Port States. + */ +struct gptp_port_states { + /** Path Delay Request state machine variables. */ + struct gptp_pdelay_req_state pdelay_req; + + /** Path Delay Response state machine variables. */ + struct gptp_pdelay_resp_state pdelay_resp; + + /** Sync Receive state machine variables. */ + struct gptp_sync_rcv_state sync_rcv; + + /** Sync Send state machine variables. */ + struct gptp_sync_send_state sync_send; + + /** PortSyncSync Receive state machine variables. */ + struct gptp_pss_rcv_state pss_rcv; + + /** PortSyncSync Send state machine variables. */ + struct gptp_pss_send_state pss_send; + + /** PortAnnounce Receive state machine variables. */ + struct gptp_port_announce_receive_state pa_rcv; + + /** PortAnnounce Information state machine variables. */ + struct gptp_port_announce_information_state pa_info; + + /** PortAnnounce Transmit state machine variables. */ + struct gptp_port_announce_transmit_state pa_transmit; +}; + +/** + * @brief Structure maintaining per port BMCA state machines variables. + */ +struct gptp_port_bmca_data { + /** Pointer to announce message. */ + struct net_pkt *rcvd_announce_ptr; + + /** A qualified announce message has been received. */ + bool rcvd_msg; + + /** Origin and state of the port's spanning tree information. */ + enum gptp_info_is info_is; + + /** The masterPriorityVector for the port. */ + struct gptp_priority_vector master_priority; + + /** Indicate if PortAnnounceInformation should copy the newly determined + * master_prioriry and master_steps_removed. + */ + bool updt_info; + + /** Cause a port to transmit Announce Information. */ + bool new_info; + + /** Announce interval. */ + struct gptp_uscaled_ns announce_interval; + + /** The portPriorityVector for the port. */ + struct gptp_priority_vector port_priority; + + /** The value of steps removed for the port. */ + u16_t port_steps_removed; + + /** The value of steps removed for the port. */ + u16_t message_steps_removed; + + /** Last announce message flags. */ + struct gptp_flags ann_flags; + + /** Last announce message current UTC offset value. */ + s16_t ann_current_utc_offset; + + /** Last announce message time source. */ + enum gptp_time_source ann_time_source; + + /** Announce receipt timeout time interval. */ + struct gptp_uscaled_ns ann_rcpt_timeout_time_interval; +}; + +#endif /* CONFIG_NET_GPTP */ + +#ifdef __cplusplus +} +#endif + +#endif /* __GPTP_STATE_H */ diff --git a/include/net/net_if.h b/include/net/net_if.h index 06ce88cf95c49..00a54e8a5ac6d 100644 --- a/include/net/net_if.h +++ b/include/net/net_if.h @@ -27,6 +27,7 @@ #include #include #include +#include #if defined(CONFIG_NET_DHCPV4) #include @@ -358,7 +359,6 @@ struct net_if_dev { */ struct net_offload *offload; #endif /* CONFIG_NET_OFFLOAD */ - }; /** @@ -372,6 +372,11 @@ struct net_if { /** The net_if_dev instance the net_if is related to */ struct net_if_dev *if_dev; +#if defined(CONFIG_NET_STATISTICS_PER_INTERFACE) + /** Network statistics related to this network interface */ + struct net_stats stats; +#endif /* CONFIG_NET_STATISTICS_PER_INTERFACE */ + /** Network interface instance configuration */ struct net_if_config config; } __net_if_align; @@ -1363,6 +1368,17 @@ struct net_if_router *net_if_ipv4_router_add(struct net_if *iface, bool net_if_ipv4_addr_mask_cmp(struct net_if *iface, struct in_addr *addr); +/** + * @brief Get a network interface that should be used when sending + * IPv4 network data to destination. + * + * @param dst IPv4 destination address + * + * @return Pointer to network interface to use, NULL if no suitable interface + * could be found. + */ +struct net_if *net_if_ipv4_select_src_iface(struct in_addr *dst); + /** * @brief Set IPv4 netmask for an interface. * @@ -1552,6 +1568,90 @@ static inline bool net_if_is_up(struct net_if *iface) */ int net_if_down(struct net_if *iface); +#if defined(CONFIG_NET_PKT_TIMESTAMP) +/** + * @typedef net_if_timestamp_callback_t + * @brief Define callback that is called after a network packet + * has been timestamped. + * @param "struct net_pkt *pkt" A pointer on a struct net_pkt which has + * been timestamped after being sent. + */ +typedef void (*net_if_timestamp_callback_t)(struct net_pkt *pkt); + +/** + * @brief Timestamp callback handler struct. + * + * Stores the timestamp callback information. Caller must make sure that + * the variable pointed by this is valid during the lifetime of + * registration. Typically this means that the variable cannot be + * allocated from stack. + */ +struct net_if_timestamp_cb { + /** Node information for the slist. */ + sys_snode_t node; + + /** Net interface for which the callback is needed. + * A NULL value means all interfaces. + */ + struct net_if *iface; + + /** Timestamp callback */ + net_if_timestamp_callback_t cb; +}; + +/** + * @brief Register a timestamp callback. + * + * @param timestamp Caller specified handler for the callback. + * @param iface Net interface for which the callback is. NULL for all + * interfaces. + * @param cb Callback to register. + */ +void net_if_register_timestamp_cb(struct net_if_timestamp_cb *timestamp, + struct net_if *iface, + net_if_timestamp_callback_t cb); + +/** + * @brief Unregister a timestamp callback. + * + * @param timestamp Caller specified handler for the callback. + */ +void net_if_unregister_timestamp_cb(struct net_if_timestamp_cb *timestamp); + +/** + * @brief Call a timestamp callback function. + * + * @param pkt Network buffer. + */ +void net_if_call_timestamp_cb(struct net_pkt *pkt); + +/* + * @brief Add timestamped TX buffer to be handled + * + * @param pkt Timestamped buffer + */ +void net_if_add_tx_timestamp(struct net_pkt *pkt); + +#if defined(CONFIG_NET_STATISTICS) +/* + * @brief Update Rx packet handling statistics + * + * @param pkt Received network packet + */ +void net_if_update_rx_timestamp_stats(struct net_pkt *pkt); + +/* + * @brief Update Tx packet handling statistics + * + * @param pkt Sent network packet + */ +void net_if_update_tx_timestamp_stats(struct net_pkt *pkt); +#else +#define net_if_update_rx_timestamp_stats(pkt) +#define net_if_update_tx_timestamp_stats(pkt) +#endif /* CONFIG_NET_STATISTICS */ +#endif /* CONFIG_NET_PKT_TIMESTAMP */ + struct net_if_api { void (*init)(struct net_if *iface); int (*send)(struct net_if *iface, struct net_pkt *pkt); @@ -1576,18 +1676,18 @@ struct net_if_api { #define NET_IF_GET(dev_name, sfx) \ ((struct net_if *)&NET_IF_GET_NAME(dev_name, sfx)) -#define NET_IF_INIT(dev_name, sfx, _l2, _mtu) \ - static struct net_if_dev (NET_IF_DEV_GET_NAME(dev_name, sfx)) __used \ - __attribute__((__section__(".net_if_dev.data"))) = { \ - .dev = &(__device_##dev_name), \ +#define NET_IF_INIT(dev_name, sfx, _l2, _mtu, _num_configs) \ + static struct net_if_dev (NET_IF_DEV_GET_NAME(dev_name, sfx)) \ + __used __attribute__((__section__(".net_if_dev.data"))) = { \ + .dev = &(DEVICE_NAME_GET(dev_name)), \ .l2 = &(NET_L2_GET_NAME(_l2)), \ .l2_data = &(NET_L2_GET_DATA(dev_name, sfx)), \ .mtu = _mtu, \ }; \ static struct net_if \ - (NET_IF_GET_NAME(dev_name, sfx))[NET_IF_MAX_CONFIGS] __used \ + (NET_IF_GET_NAME(dev_name, sfx))[_num_configs] __used \ __attribute__((__section__(".net_if.data"))) = { \ - [0 ... (NET_IF_MAX_CONFIGS - 1)] = { \ + [0 ... (_num_configs - 1)] = { \ .if_dev = &(NET_IF_DEV_GET_NAME(dev_name, sfx)), \ NET_IF_CONFIG_INIT \ } \ @@ -1601,7 +1701,7 @@ struct net_if_api { DEVICE_AND_API_INIT(dev_name, drv_name, init_fn, data, \ cfg_info, POST_KERNEL, prio, api); \ NET_L2_DATA_INIT(dev_name, 0, l2_ctx_type); \ - NET_IF_INIT(dev_name, 0, l2, mtu) + NET_IF_INIT(dev_name, 0, l2, mtu, NET_IF_MAX_CONFIGS) /** * If your network device needs more than one instance of a network interface, @@ -1614,7 +1714,7 @@ struct net_if_api { DEVICE_AND_API_INIT(dev_name, drv_name, init_fn, data, \ cfg_info, POST_KERNEL, prio, api); \ NET_L2_DATA_INIT(dev_name, instance, l2_ctx_type); \ - NET_IF_INIT(dev_name, instance, l2, mtu) + NET_IF_INIT(dev_name, instance, l2, mtu, NET_IF_MAX_CONFIGS) #ifdef __cplusplus } diff --git a/include/net/net_ip.h b/include/net/net_ip.h index 76d1b20b9dafa..acdfbea36ed3c 100644 --- a/include/net/net_ip.h +++ b/include/net/net_ip.h @@ -32,6 +32,9 @@ extern "C" { #endif +/* Specifying VLAN tag here in order to avoid circular dependencies */ +#define NET_VLAN_TAG_UNSPEC 0x0fff + /** Protocol families */ #define PF_UNSPEC 0 /* Unspecified. */ #define PF_INET 2 /* IP protocol family. */ @@ -58,8 +61,10 @@ enum net_sock_type { #define ntohs(x) sys_be16_to_cpu(x) #define ntohl(x) sys_be32_to_cpu(x) +#define ntohll(x) sys_be64_to_cpu(x) #define htons(x) sys_cpu_to_be16(x) #define htonl(x) sys_cpu_to_be32(x) +#define htonll(x) sys_cpu_to_be64(x) /** IPv6 address structure */ struct in6_addr { @@ -978,6 +983,20 @@ int net_tx_priority2tc(enum net_priority prio); */ int net_rx_priority2tc(enum net_priority prio); +/** + * @brief Convert network packet VLAN priority to network packet priority so we + * can place the packet into correct queue. + * + * @param priority VLAN priority + * + * @return Network priority + */ +static inline enum net_priority net_vlan2priority(u8_t priority) +{ + /* Currently this is 1:1 mapping */ + return priority; +} + #ifdef __cplusplus } #endif diff --git a/include/net/net_l2.h b/include/net/net_l2.h index 761109fb5fd5a..bcc0d50f3b107 100644 --- a/include/net/net_l2.h +++ b/include/net/net_l2.h @@ -60,8 +60,6 @@ struct net_l2 { extern const struct net_l2 NET_L2_GET_NAME(_name) #define NET_L2_GET_CTX_TYPE(_name) _name##_CTX_TYPE -extern struct net_l2 __net_l2_start[]; - #ifdef CONFIG_NET_L2_DUMMY #define DUMMY_L2 DUMMY #define DUMMY_L2_CTX_TYPE void* @@ -70,7 +68,6 @@ NET_L2_DECLARE_PUBLIC(DUMMY_L2); #ifdef CONFIG_NET_L2_ETHERNET #define ETHERNET_L2 ETHERNET -#define ETHERNET_L2_CTX_TYPE void* NET_L2_DECLARE_PUBLIC(ETHERNET_L2); #endif /* CONFIG_NET_L2_ETHERNET */ @@ -96,8 +93,6 @@ NET_L2_DECLARE_PUBLIC(OFFLOAD_IP); NET_L2_DECLARE_PUBLIC(OPENTHREAD_L2); #endif /* CONFIG_NET_L2_OPENTHREAD */ -extern struct net_l2 __net_l2_end[]; - #define NET_L2_INIT(_name, _recv_fn, _send_fn, _reserve_fn, _enable_fn) \ const struct net_l2 (NET_L2_GET_NAME(_name)) __used \ __attribute__((__section__(".net_l2.init"))) = { \ diff --git a/include/net/net_pkt.h b/include/net/net_pkt.h index dbeb56aa7caab..591b71152674f 100644 --- a/include/net/net_pkt.h +++ b/include/net/net_pkt.h @@ -26,6 +26,8 @@ #include #include #include +#include +#include #ifdef __cplusplus extern "C" { @@ -73,6 +75,19 @@ struct net_pkt { struct net_if *orig_iface; /* Original network interface */ #endif +#if defined(CONFIG_NET_PKT_TIMESTAMP) + /** Timestamp if available. */ + struct net_ptp_time timestamp; +#if defined(CONFIG_NET_STATISTICS) + /** This is used for collecting statistics. This is updated by + * the driver so it is not fully accurate. This is done using hw cycles + * as we do not have an API that would return time in nanoseconds. + */ + u32_t cycles_create; + u32_t cycles_update; +#endif +#endif + u8_t *appdata; /* application data starts here */ u8_t *next_hdr; /* where is the next header */ @@ -148,7 +163,19 @@ struct net_pkt { * is not prioritised. */ u8_t priority; + + /** Traffic class value. */ + u8_t traffic_class; #endif + +#if defined(CONFIG_NET_VLAN) + /* VLAN TCI (Tag Control Information). This contains the Priority + * Code Point (PCP), Drop Eligible Indicator (DEI) and VLAN + * Identifier (VID, called more commonly VLAN tag). This value is + * kept in host byte order. + */ + u16_t vlan_tci; +#endif /* CONFIG_NET_VLAN */ /* @endcond */ /** Reference counter */ @@ -409,20 +436,149 @@ static inline void net_pkt_set_priority(struct net_pkt *pkt, { pkt->priority = priority; } + +static inline u8_t net_pkt_traffic_class(struct net_pkt *pkt) +{ + return pkt->traffic_class; +} + +static inline void net_pkt_set_traffic_class(struct net_pkt *pkt, u8_t tc) +{ + pkt->traffic_class = tc; +} #else static inline u8_t net_pkt_priority(struct net_pkt *pkt) { return 0; } -static inline void net_pkt_set_priority(struct net_pkt *pkt, - u8_t priority) +static inline u8_t net_pkt_traffic_class(struct net_pkt *pkt) { ARG_UNUSED(pkt); - ARG_UNUSED(priority); + return 0; +} + +static inline void net_pkt_set_traffic_class(struct net_pkt *pkt, u8_t tc) +{ + ARG_UNUSED(pkt); + ARG_UNUSED(tc); } #endif +#if defined(CONFIG_NET_VLAN) +static inline u16_t net_pkt_vlan_tag(struct net_pkt *pkt) +{ + return net_eth_get_vid(pkt->vlan_tci); +} + +static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, u16_t tag) +{ + pkt->vlan_tci = net_eth_set_vid(pkt->vlan_tci, tag); +} + +static inline u8_t net_pkt_vlan_priority(struct net_pkt *pkt) +{ + return net_eth_get_pcp(pkt->vlan_tci); +} + +static inline void net_pkt_set_vlan_priority(struct net_pkt *pkt, + u8_t priority) +{ + pkt->vlan_tci = net_eth_set_pcp(pkt->vlan_tci, priority); +} + +static inline bool net_pkt_vlan_dei(struct net_pkt *pkt) +{ + return net_eth_get_dei(pkt->vlan_tci); +} + +static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei) +{ + pkt->vlan_tci = net_eth_set_dei(pkt->vlan_tci, dei); +} + +static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, u16_t tci) +{ + pkt->vlan_tci = tci; +} + +static inline u16_t net_pkt_vlan_tci(struct net_pkt *pkt) +{ + return pkt->vlan_tci; +} +#else +static inline u16_t net_pkt_vlan_tag(struct net_pkt *pkt) +{ + return NET_VLAN_TAG_UNSPEC; +} + +static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, u16_t tag) +{ + ARG_UNUSED(pkt); + ARG_UNUSED(tag); +} + +static inline u8_t net_pkt_vlan_priority(struct net_pkt *pkt) +{ + ARG_UNUSED(pkt); + return 0; +} + +static inline bool net_pkt_vlan_dei(struct net_pkt *pkt) +{ + return false; +} + +static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei) +{ + ARG_UNUSED(pkt); + ARG_UNUSED(dei); +} + +static inline u16_t net_pkt_vlan_tci(struct net_pkt *pkt) +{ + return NET_VLAN_TAG_UNSPEC; /* assumes priority is 0 */ +} + +static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, u16_t tci) +{ + ARG_UNUSED(pkt); + ARG_UNUSED(tci); +} +#endif + +#if defined(CONFIG_NET_PKT_TIMESTAMP) +static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt) +{ + return &pkt->timestamp; +} + +static inline void net_pkt_set_timestamp(struct net_pkt *pkt, + struct net_ptp_time *timestamp) +{ + pkt->timestamp.second = timestamp->second; + pkt->timestamp.nanosecond = timestamp->nanosecond; + +#if defined(CONFIG_NET_STATISTICS) + pkt->cycles_update = k_cycle_get_32(); +#endif +} +#else +static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt) +{ + ARG_UNUSED(pkt); + + return NULL; +} + +static inline void net_pkt_set_timestamp(struct net_pkt *pkt, + struct net_ptp_time *timestamp) +{ + ARG_UNUSED(pkt); + ARG_UNUSED(timestamp); +} +#endif /* CONFIG_NET_PKT_TIMESTAMP */ + static inline size_t net_pkt_get_len(struct net_pkt *pkt) { return net_buf_frags_len(pkt->frags); diff --git a/include/net/net_stats.h b/include/net/net_stats.h index 341be1e329869..1f5c6ac0b6673 100644 --- a/include/net/net_stats.h +++ b/include/net/net_stats.h @@ -31,8 +31,13 @@ extern "C" { typedef u32_t net_stats_t; struct net_stats_bytes { - u32_t sent; - u32_t received; + net_stats_t sent; + net_stats_t received; +}; + +struct net_stats_pkts { + net_stats_t tx; + net_stats_t rx; }; struct net_stats_ip { @@ -245,6 +250,27 @@ struct net_stats_tc { } recv[NET_TC_RX_COUNT]; }; +struct net_stats_ts_data { + /** Processing time in nanoseconds */ + u32_t low; + u32_t average; + u32_t high; +}; + +struct net_stats_ts { + /** Network packet timestamping statistics. This tells how many + * nanoseconds it took for packet to transmit or receive. This + * is only calculated for those packets that have TX time-stamping + * enabled. + */ + struct { + struct net_stats_ts_data time; + } tx[NET_TC_TX_COUNT]; + + struct { + struct net_stats_ts_data time; + } rx[NET_TC_RX_COUNT]; +}; struct net_stats { net_stats_t processing_error; @@ -292,6 +318,69 @@ struct net_stats { #if NET_TC_COUNT > 1 struct net_stats_tc tc; #endif + +#if defined(CONFIG_NET_PKT_TIMESTAMP) + struct net_stats_ts ts; +#endif +}; + +struct net_stats_eth_errors { + net_stats_t rx_length_errors; + net_stats_t rx_over_errors; + net_stats_t rx_crc_errors; + net_stats_t rx_frame_errors; + net_stats_t rx_no_buffer_count; + net_stats_t rx_missed_errors; + net_stats_t rx_long_length_errors; + net_stats_t rx_short_length_errors; + net_stats_t rx_align_errors; + net_stats_t rx_dma_failed; + net_stats_t rx_buf_alloc_failed; + + net_stats_t tx_aborted_errors; + net_stats_t tx_carrier_errors; + net_stats_t tx_fifo_errors; + net_stats_t tx_heartbeat_errors; + net_stats_t tx_window_errors; + net_stats_t tx_dma_failed; + + net_stats_t uncorr_ecc_errors; + net_stats_t corr_ecc_errors; +}; + +struct net_stats_eth_flow { + net_stats_t rx_flow_control_xon; + net_stats_t rx_flow_control_xoff; + net_stats_t tx_flow_control_xon; + net_stats_t tx_flow_control_xoff; +}; + +struct net_stats_eth_csum { + net_stats_t rx_csum_offload_good; + net_stats_t rx_csum_offload_errors; +}; + +struct net_stats_eth_hw_timestamp { + net_stats_t rx_hwtstamp_cleared; + net_stats_t tx_hwtstamp_timeouts; + net_stats_t tx_hwtstamp_skipped; +}; + +/* Ethernet specific statistics */ +struct net_stats_eth { + struct net_stats_bytes bytes; + struct net_stats_pkts pkts; + struct net_stats_pkts broadcast; + struct net_stats_pkts multicast; + struct net_stats_pkts errors; + struct net_stats_eth_errors error_details; + struct net_stats_eth_flow flow_control; + struct net_stats_eth_csum csum; + struct net_stats_eth_hw_timestamp hw_timestamp; + net_stats_t collisions; + net_stats_t tx_dropped; + net_stats_t tx_timeout_count; + net_stats_t tx_restart_queue; }; #if defined(CONFIG_NET_STATISTICS_USER_API) @@ -316,27 +405,28 @@ enum net_request_stats_cmd { NET_REQUEST_STATS_CMD_GET_UDP, NET_REQUEST_STATS_CMD_GET_TCP, NET_REQUEST_STATS_CMD_GET_RPL, + NET_REQUEST_STATS_CMD_GET_ETHERNET, }; #define NET_REQUEST_STATS_GET_ALL \ (_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_ALL) -//NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_ALL); +NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_ALL); #define NET_REQUEST_STATS_GET_PROCESSING_ERROR \ (_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_PROCESSING_ERROR) -//NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_PROCESSING_ERROR); +NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_PROCESSING_ERROR); #define NET_REQUEST_STATS_GET_BYTES \ (_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_BYTES) -//NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_BYTES); +NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_BYTES); #define NET_REQUEST_STATS_GET_IP_ERRORS \ (_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_IP_ERRORS) -//NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_IP_ERRORS); +NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_IP_ERRORS); #if defined(CONFIG_NET_STATISTICS_IPV4) #define NET_REQUEST_STATS_GET_IPV4 \ @@ -387,6 +477,12 @@ NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_TCP); NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_RPL); #endif /* CONFIG_NET_STATISTICS_RPL */ +#if defined(CONFIG_NET_STATISTICS_ETHERNET) +#define NET_REQUEST_STATS_GET_ETHERNET \ + (_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_ETHERNET) + +NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_ETHERNET); +#endif /* CONFIG_NET_STATISTICS_RPL */ #endif /* CONFIG_NET_STATISTICS_USER_API */ /** diff --git a/include/net/vlan.h b/include/net/vlan.h new file mode 100644 index 0000000000000..33edd1af2fedb --- /dev/null +++ b/include/net/vlan.h @@ -0,0 +1,76 @@ +/** @file + * @brief VLAN specific definitions. + * + * Virtual LAN specific definitions. + */ + +/* + * Copyright (c) 2018 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef __VLAN_H +#define __VLAN_H + +/** + * @brief VLAN definitions and helpers + * @defgroup vlan Virtual LAN definitions and helpers + * @ingroup networking + * @{ + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define NET_VLAN_TAG_UNSPEC 0x0fff + +/* Get VLAN identifier from TCI */ +static inline u16_t net_eth_get_vid(u16_t tci) +{ + return tci & 0x0fff; +} + +/* Get Drop Eligible Indicator from TCI */ +static inline u8_t net_eth_get_dei(u16_t tci) +{ + return (tci >> 12) & 0x01; +} + +/* Get Priority Code Point from TCI */ +static inline u8_t net_eth_get_pcp(u16_t tci) +{ + return (tci >> 13) & 0x07; +} + +/* Set VLAN identifier to TCI */ +static inline u16_t net_eth_set_vid(u16_t tci, u16_t vid) +{ + return (tci & 0xf000) | (vid & 0x0fff); +} + +/* Set Drop Eligible Indicator to TCI */ +static inline u16_t net_eth_set_dei(u16_t tci, bool dei) +{ + return (tci & 0xefff) | ((!!dei) << 12); +} + +/* Set Priority Code Point to TCI */ +static inline u16_t net_eth_set_pcp(u16_t tci, u8_t pcp) +{ + return (tci & 0x1fff) | ((pcp & 0x07) << 13); +} + +#ifdef __cplusplus +} +#endif + +/** + * @} + */ + + +#endif /* __VLAN_H */ diff --git a/include/ptp_clock.h b/include/ptp_clock.h new file mode 100644 index 0000000000000..5d68e0dd12c65 --- /dev/null +++ b/include/ptp_clock.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _PTP_CLOCK_H_ +#define _PTP_CLOCK_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Special alignment is needed for ptp_clock which is stored in + * a ptp_clock linker section if there are more than one ptp clock + * in the system. If there is only one ptp clock, + * then this alignment is not needed, unfortunately this cannot be + * known beforehand. + * + * The ptp_clock struct needs to be aligned to 32 byte boundary, + * otherwise the __ptp_clock_end will point to wrong location and ptp_clock + * initialization done in ptp_clock_init() will not find proper values + * for the second interface. + * + * So this alignment is a workaround and should eventually be removed. + */ +#define __ptp_clock_align __aligned(32) + +struct ptp_clock; + +struct ptp_clock_driver_api { + int (*set)(struct ptp_clock *clk, struct net_ptp_time *tm); + int (*get)(struct ptp_clock *clk, struct net_ptp_time *tm); + int (*adjust)(struct ptp_clock *clk, int increment); + int (*rate_adjust)(struct ptp_clock *clk, float ratio); +}; + +/** + * @brief PTP Clock structure + * + * Used to handle a ptp clock on top of a device driver instance. + * There can be many ptp_clock instance against the same device. + */ +struct ptp_clock { + /** The actually device driver instance the ptp_clock is related to */ + struct device *dev; + + /** API for the ptp clock. */ + struct ptp_clock_driver_api const *api; +} __ptp_clock_align; + +#define PTP_CLOCK_GET_NAME(dev_name, sfx) (__ptp_clock_##dev_name##_##sfx) +#define PTP_CLOCK_GET(dev_name, sfx) \ + ((struct ptp_clock *)&PTP_CLOCK_GET_NAME(dev_name, sfx)) + +#define PTP_CLOCK_INIT(dev_name, sfx, api) \ + static const struct ptp_clock \ + (PTP_CLOCK_GET_NAME(dev_name, sfx)) __used \ + __attribute__((__section__(".ptp_clock.data"))) = { \ + .dev = &(DEVICE_NAME_GET(dev_name)), \ + .api = &(api), \ + } \ + +/* PTP clock initialization macros */ + +#define PTP_CLOCK_DEVICE_INIT(dev_name, api) \ + PTP_CLOCK_INIT(dev_name, 0, api) + +#define PTP_CLOCK_INIT_INSTANCE(dev_name, instance, api) \ + PTP_CLOCK_INIT(dev_name, instance, api) + +static inline void ptp_clock_set(struct ptp_clock *clk, struct net_ptp_time *tm) +{ + const struct ptp_clock_driver_api *api = clk->api; + + api->set(clk, tm); +} + +static inline void ptp_clock_get(struct ptp_clock *clk, struct net_ptp_time *tm) +{ + const struct ptp_clock_driver_api *api = clk->api; + + api->get(clk, tm); +} + +static inline void ptp_clock_adjust(struct ptp_clock *clk, int increment) +{ + const struct ptp_clock_driver_api *api = clk->api; + + api->adjust(clk, increment); +} + +static inline void ptp_clock_rate_adjust(struct ptp_clock *clk, float rate) +{ + const struct ptp_clock_driver_api *api = clk->api; + + api->rate_adjust(clk, rate); +} + +struct ptp_clock *ptp_clock_lookup_by_dev(struct device *dev); + +#ifdef __cplusplus +} +#endif + +#endif /* __PTP_CLOCK_H__ */ diff --git a/samples/net/gptp/CMakeLists.txt b/samples/net/gptp/CMakeLists.txt new file mode 100644 index 0000000000000..46ae3253a38cb --- /dev/null +++ b/samples/net/gptp/CMakeLists.txt @@ -0,0 +1,8 @@ +set(KCONFIG_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/Kconfig) + +include($ENV{ZEPHYR_BASE}/cmake/app/boilerplate.cmake NO_POLICY_SCOPE) +project(NONE) + +target_sources(app PRIVATE src/main.c) + +include($ENV{ZEPHYR_BASE}/samples/net/common/common.cmake) diff --git a/samples/net/gptp/Kconfig b/samples/net/gptp/Kconfig new file mode 100644 index 0000000000000..105e4bf42be20 --- /dev/null +++ b/samples/net/gptp/Kconfig @@ -0,0 +1,64 @@ +# Kconfig - Private config options for gPTP sample app + +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +# This sample application will have three network interfaces. +# gPTP protocol will run in the non-VLAN interface and then there +# will be two VLAN interface for other use. This is just an example +# how to do this kind of setup. See also VLAN sample application +# for vlan-setup-linux.sh script that can be used to setup the +# VLAN IP addressing in Linux side (if that is desired). + +mainmenu "gPTP sample application" + +config ZEPHYR_BASE + string + option env="ZEPHYR_BASE" + +source "$ZEPHYR_BASE/Kconfig.zephyr" + +if NET_GPTP + +config NET_SAMPLE_IFACE2_MY_IPV6_ADDR + string "My IPv6 address for second interface" + help + The value depends on your network setup. + +config NET_SAMPLE_IFACE2_MY_IPV4_ADDR + string "My IPv4 address for second interface" + help + The value depends on your network setup. + +config NET_SAMPLE_IFACE2_VLAN_TAG + int "VLAN tag for second interface" + default 100 + range 0 4094 + depends on NET_VLAN + help + Set VLAN (virtual LAN) tag (id) that is used in the sample + application. + +config NET_SAMPLE_IFACE3_MY_IPV6_ADDR + string "My IPv6 address for third interface" + help + The value depends on your network setup. + +config NET_SAMPLE_IFACE3_MY_IPV4_ADDR + string "My IPv4 address for third interface" + help + The value depends on your network setup. + +config NET_SAMPLE_IFACE3_VLAN_TAG + int "VLAN tag for third interface" + default 200 + range 0 4094 + depends on NET_VLAN + help + Set VLAN (virtual LAN) tag (id) that is used in the sample + application. + +endif diff --git a/samples/net/gptp/README.rst b/samples/net/gptp/README.rst new file mode 100644 index 0000000000000..c8b849bab1099 --- /dev/null +++ b/samples/net/gptp/README.rst @@ -0,0 +1,110 @@ +.. _gptp-sample: + +gPTP Sample Application +####################### + +Overview +******** + +The gPTP sample application for Zephyr will enable gPTP support, registers +gPTP phase discontinuity callback, enable traffic class support (TX multi +queues) and setup VLANs (if enabled). The net-shell is also enabled so that +user can monitor gPTP functionality. + +The source code for this sample application can be found at: +:file:`samples/net/gptp`. + +Requirements +************ + +- :ref:`networking_with_qemu` + +Building and Running +******************** + +A good way to run this sample is to run this gPTP application inside QEMU +as described in :ref:`networking_with_qemu` or with embedded device like +FRDM-K64F. Note that gPTP is only supported for boards that have ethernet port +and which has support for collecting timestamps for sent and received +ethernet frames. + +Follow these steps to build the gPTP sample application: + +.. zephyr-app-commands:: + :zephyr-app: samples/net/gptp + :board: + :conf: prj.conf + :goals: build + :compact: + +The net-shell command "net gptp" will print out general gPTP information. +For port 1, the command "net gptp 1" will print detailed information about +port 1 statistics etc. Note that executing the shell command could affect +the timing of the gPTP packets and the grandmaster might mark the device +as non AS capable and disable it. + +Setting up Linux Host +===================== + +If you need VLAN support in your network, then the +:file:`samples/net/vlan/vlan-setup-linux.sh` provides a script that can be +executed on the Linux host. It creates two VLANs on the Linux host and creates +routes to Zephyr. + +The OpenAVNU repository at https://github.com/AVnu/OpenAvnu contains gPTP +daemon that can be run in Linux host and which can act as a grandmaster for +the IEEE 801.1AS network. + +After downloading the source code, compile it like this in Linux: + +.. code-block:: console + + mkdir build + cd build + cmake .. + cp daemons/gptp/gptp_cfg.ini build/daemons/gptp/ + cd build/daemons/gptp + +Edit the gptp_cfg.ini file and set the neighborPropDelayThresh to 10000 +as the default value 800 is too low if you run the gPTP in FRDM-K64F. + +Then execute the daemon with correct network interface and the configuration +file. + +.. code-block:: console + + sudo ./gptp enp0s25 -F gptp_cfg.ini + +Note that here the example network interface enp0s25 is the name of the +non-VLAN network interface that is connected to your Zephyr device. + +If everything is configured correctly, you should see following kind of +messages from gptp: + +.. code-block:: console + + INFO : GPTP [13:01:14:837] gPTP starting + INFO : GPTP [13:01:14:838] priority1 = 248 + INFO : GPTP [13:01:14:838] announceReceiptTimeout: 3 + INFO : GPTP [13:01:14:838] syncReceiptTimeout: 3 + INFO : GPTP [13:01:14:838] LINKSPEED_100MB - PHY delay + TX: 1044 | RX: 2133 + INFO : GPTP [13:01:14:838] LINKSPEED_1G - PHY delay + TX: 184 | RX: 382 + INFO : GPTP [13:01:14:838] neighborPropDelayThresh: 10000 + INFO : GPTP [13:01:14:838] syncReceiptThreshold: 8 + ERROR : GPTP [13:01:14:838] Using clock device: /dev/ptp0 + STATUS : GPTP [13:01:14:838] Starting PDelay + STATUS : GPTP [13:01:14:838] Link Speed: 1000000 kb/sec + STATUS : GPTP [13:01:14:871] AsCapable: Enabled + STATUS : GPTP [13:01:16:497] New Grandmaster "3C:97:0E:FF:FE:23:F2:32" (previous "00:00:00:00:00:00:00:00") + STATUS : GPTP [13:01:16:497] Switching to Master + +If Zephyr syncs properly with gptp daemon, then this is printed: + +.. code-block:: console + + STATUS : GPTP [13:01:25:965] AsCapable: Enabled + +By default gPTP in Zephyr will not print any gPTP debug messages to console. +One can enable debug prints by setting CONFIG_NET_DEBUG_GPTP=y in config file. diff --git a/samples/net/gptp/prj.conf b/samples/net/gptp/prj.conf new file mode 100644 index 0000000000000..87f5a630b5994 --- /dev/null +++ b/samples/net/gptp/prj.conf @@ -0,0 +1,94 @@ +CONFIG_NETWORKING=y +CONFIG_NET_LOG=y +CONFIG_NET_IPV6=y +CONFIG_NET_IPV4=y +CONFIG_NET_DHCPV4=n +CONFIG_NET_UDP=y +CONFIG_NET_TCP=y +CONFIG_NET_STATISTICS=y + +CONFIG_TEST_RANDOM_GENERATOR=y + +CONFIG_NET_PKT_RX_COUNT=32 +CONFIG_NET_PKT_TX_COUNT=32 +CONFIG_NET_BUF_RX_COUNT=32 +CONFIG_NET_BUF_TX_COUNT=32 +CONFIG_NET_IF_UNICAST_IPV6_ADDR_COUNT=5 +CONFIG_NET_IF_MCAST_IPV6_ADDR_COUNT=5 +CONFIG_NET_IF_UNICAST_IPV4_ADDR_COUNT=1 +CONFIG_NET_MAX_CONTEXTS=10 + +CONFIG_INIT_STACKS=y +CONFIG_PRINTK=y +CONFIG_NET_SHELL=y + +# Ethernet is needed for gPTP +CONFIG_NET_L2_ETHERNET=y + +CONFIG_NET_APP_SERVER=y +CONFIG_NET_APP_NEED_IPV6=y +CONFIG_NET_APP_NEED_IPV4=y +CONFIG_NET_APP_SETTINGS=y + +# There will be three network interfaces. gPTP will +# run in non-VLAN interface and then there are two extra +# VLAN interface for other use. This is just an example +# how to do this kind of setup. + +# First ethernet interface will use these settings +CONFIG_NET_APP_MY_IPV6_ADDR="2001:db8::1" +CONFIG_NET_APP_PEER_IPV6_ADDR="2001:db8::2" +CONFIG_NET_APP_MY_IPV4_ADDR="192.0.2.1" +CONFIG_NET_APP_PEER_IPV4_ADDR="192.0.2.2" + +# Second ethernet interface will have these settings +CONFIG_NET_SAMPLE_IFACE2_MY_IPV6_ADDR="2001:db8:100::1" +CONFIG_NET_SAMPLE_IFACE2_PEER_IPV6_ADDR="2001:db8:100::2" +# TEST-NET-2 from RFC 5737 +CONFIG_NET_SAMPLE_IFACE2_MY_IPV4_ADDR="198.51.100.1" +CONFIG_NET_SAMPLE_IFACE2_PEER_IPV4_ADDR="198.51.100.2" +# VLAN tag for the second interface +CONFIG_NET_SAMPLE_IFACE2_VLAN_TAG=100 + +# Settings for the third network interface +CONFIG_NET_SAMPLE_IFACE3_MY_IPV6_ADDR="2001:db8:200::1" +CONFIG_NET_SAMPLE_IFACE3_PEER_IPV6_ADDR="2001:db8:200::2" +# TEST-NET-3 from RFC 5737 +CONFIG_NET_SAMPLE_IFACE3_MY_IPV4_ADDR="203.0.113.1" +CONFIG_NET_SAMPLE_IFACE3_PEER_IPV4_ADDR="203.0.113.2" +# VLAN tag for the second interface +CONFIG_NET_SAMPLE_IFACE3_VLAN_TAG=200 + +# Logging +CONFIG_SYS_LOG_SHOW_COLOR=y +CONFIG_SYS_LOG_NET_LEVEL=4 + +CONFIG_NET_DEBUG_NET_PKT=y +CONFIG_NET_DEBUG_L2_ETHERNET=n +CONFIG_NET_DEBUG_ARP=n +CONFIG_NET_DEBUG_CORE=n +CONFIG_NET_DEBUG_IF=n +CONFIG_NET_DEBUG_GPTP=n + +# VLAN settings. We will have three VLANs, but the one running gPTP protocol +# will not have any tags (see IEEE 802.11AS chapter 11.3.3 for details) +CONFIG_NET_VLAN=y +CONFIG_NET_VLAN_COUNT=3 + +# gPTP settings +CONFIG_NET_GPTP=y +CONFIG_NET_GPTP_STATISTICS=y + +# How many traffic classes to enable +CONFIG_NET_TC_TX_COUNT=6 +CONFIG_NET_TC_RX_COUNT=4 + +# Enable priority support in net_context +CONFIG_NET_CONTEXT_PRIORITY=y + +# Settings for native_posix ethernet driver (if compiled for that board) +CONFIG_ETH_NATIVE_POSIX=y +CONFIG_SYS_LOG_ETHERNET_LEVEL=1 +CONFIG_ETH_NATIVE_POSIX_PTP_CLOCK=y +#CONFIG_ETH_NATIVE_POSIX_RANDOM_MAC=y +CONFIG_ETH_NATIVE_POSIX_MAC_ADDR="00:00:5e:00:53:2a" diff --git a/samples/net/gptp/prj_frdm_k64f.conf b/samples/net/gptp/prj_frdm_k64f.conf new file mode 100644 index 0000000000000..a3f893b60b624 --- /dev/null +++ b/samples/net/gptp/prj_frdm_k64f.conf @@ -0,0 +1,98 @@ +CONFIG_NETWORKING=y +CONFIG_NET_LOG=y +CONFIG_NET_IPV6=y +CONFIG_NET_IPV4=y +CONFIG_NET_DHCPV4=n +CONFIG_NET_UDP=y +CONFIG_NET_TCP=y +CONFIG_NET_STATISTICS=y + +CONFIG_TEST_RANDOM_GENERATOR=y + +CONFIG_NET_PKT_RX_COUNT=32 +CONFIG_NET_PKT_TX_COUNT=32 +CONFIG_NET_BUF_RX_COUNT=32 +CONFIG_NET_BUF_TX_COUNT=32 +CONFIG_NET_IF_UNICAST_IPV6_ADDR_COUNT=5 +CONFIG_NET_IF_MCAST_IPV6_ADDR_COUNT=5 +CONFIG_NET_IF_UNICAST_IPV4_ADDR_COUNT=1 +CONFIG_NET_MAX_CONTEXTS=10 + +CONFIG_INIT_STACKS=y +CONFIG_PRINTK=y +CONFIG_NET_SHELL=y + +# Ethernet is needed for gPTP +CONFIG_NET_L2_ETHERNET=y + +CONFIG_NET_APP_SERVER=y +CONFIG_NET_APP_NEED_IPV6=y +CONFIG_NET_APP_NEED_IPV4=y +CONFIG_NET_APP_SETTINGS=y + +# There will be three network interfaces. gPTP will +# run in non-VLAN interface and then there are two extra +# VLAN interface for other use. This is just an example +# how to do this kind of setup. + +# First ethernet interface will use these settings +CONFIG_NET_APP_MY_IPV6_ADDR="2001:db8::1" +CONFIG_NET_APP_PEER_IPV6_ADDR="2001:db8::2" +CONFIG_NET_APP_MY_IPV4_ADDR="192.0.2.1" +CONFIG_NET_APP_PEER_IPV4_ADDR="192.0.2.2" + +# Second ethernet interface will have these settings +CONFIG_NET_SAMPLE_IFACE2_MY_IPV6_ADDR="2001:db8:100::1" +CONFIG_NET_SAMPLE_IFACE2_PEER_IPV6_ADDR="2001:db8:100::2" +# TEST-NET-2 from RFC 5737 +CONFIG_NET_SAMPLE_IFACE2_MY_IPV4_ADDR="198.51.100.1" +CONFIG_NET_SAMPLE_IFACE2_PEER_IPV4_ADDR="198.51.100.2" +# VLAN tag for the second interface +CONFIG_NET_SAMPLE_IFACE2_VLAN_TAG=100 + +# Settings for the third network interface +CONFIG_NET_SAMPLE_IFACE3_MY_IPV6_ADDR="2001:db8:200::1" +CONFIG_NET_SAMPLE_IFACE3_PEER_IPV6_ADDR="2001:db8:200::2" +# TEST-NET-3 from RFC 5737 +CONFIG_NET_SAMPLE_IFACE3_MY_IPV4_ADDR="203.0.113.1" +CONFIG_NET_SAMPLE_IFACE3_PEER_IPV4_ADDR="203.0.113.2" +# VLAN tag for the second interface +CONFIG_NET_SAMPLE_IFACE3_VLAN_TAG=200 + +# Logging +CONFIG_SYS_LOG_SHOW_COLOR=y +CONFIG_SYS_LOG_NET_LEVEL=4 + +CONFIG_NET_DEBUG_NET_PKT=y +CONFIG_NET_DEBUG_L2_ETHERNET=n +CONFIG_NET_DEBUG_ARP=n +CONFIG_NET_DEBUG_CORE=n +CONFIG_NET_DEBUG_IF=n +CONFIG_NET_DEBUG_GPTP=n +#CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG=n + +# VLAN settings. We will have three VLANs, but the one running gPTP protocol +# will not have any tags (see IEEE 802.11AS chapter 11.3.3 for details) +CONFIG_NET_VLAN=y +CONFIG_NET_VLAN_COUNT=3 + +# gPTP settings +CONFIG_NET_GPTP=y +CONFIG_NET_GPTP_STATISTICS=y + +# MCUX driver settings +CONFIG_ETH_MCUX=y +CONFIG_PTP_CLOCK_MCUX=y + +# Optionally you can use fixed MAC address +CONFIG_ETH_MCUX_0_RANDOM_MAC=n +CONFIG_ETH_MCUX_0_MAC3=0xBC +CONFIG_ETH_MCUX_0_MAC4=0x8C +CONFIG_ETH_MCUX_0_MAC5=0xAE + +# How many traffic classes to enable +CONFIG_NET_TC_TX_COUNT=8 +CONFIG_NET_TC_RX_COUNT=8 + +# Enable priority support in net_context +CONFIG_NET_CONTEXT_PRIORITY=y diff --git a/samples/net/gptp/sample.yaml b/samples/net/gptp/sample.yaml new file mode 100644 index 0000000000000..2918b012f6056 --- /dev/null +++ b/samples/net/gptp/sample.yaml @@ -0,0 +1,10 @@ +common: + harness: net + tags: net gptp +sample: + description: Test gPTP functionality + name: gPTP sample app +tests: + test: + platform_whitelist: qemu_x86 frdm_k64f + depends_on: netif diff --git a/samples/net/gptp/src/main.c b/samples/net/gptp/src/main.c new file mode 100644 index 0000000000000..c3f14b102d2ff --- /dev/null +++ b/samples/net/gptp/src/main.c @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2018 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if 1 +#define SYS_LOG_DOMAIN "gptp-app" +#define NET_SYS_LOG_LEVEL SYS_LOG_LEVEL_DEBUG +#define NET_LOG_ENABLED 1 +#endif + +#include +#include + +#include +#include +#include +#include +#include + +static struct gptp_phase_dis_cb phase_dis; + +/* Enable following if you want to run gPTP over VLAN with this application */ +#define GPTP_OVER_VLAN 0 +#define GPTP_VLAN_TAG 42 + +#if defined(CONFIG_NET_VLAN) +/* User data for the interface callback */ +struct ud { + struct net_if *first; + struct net_if *second; + struct net_if *third; +}; + +static void iface_cb(struct net_if *iface, void *user_data) +{ + struct ud *ud = user_data; + + if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) { + return; + } + + if (!ud->first) { + ud->first = iface; + return; + } + + if (!ud->second) { + ud->second = iface; + return; + } + + if (!ud->third) { + ud->third = iface; + return; + } +} + +static int setup_iface(struct net_if *iface, const char *ipv6_addr, + const char *ipv4_addr, u16_t vlan_tag) +{ + struct net_if_addr *ifaddr; + struct in_addr addr4; + struct in6_addr addr6; + int ret; + + ret = net_eth_vlan_enable(iface, vlan_tag); + if (ret < 0) { + NET_ERR("Cannot enable VLAN for tag %d (%d)", vlan_tag, ret); + } + + if (net_addr_pton(AF_INET6, ipv6_addr, &addr6)) { + NET_ERR("Invalid address: %s", ipv6_addr); + return -EINVAL; + } + + ifaddr = net_if_ipv6_addr_add(iface, &addr6, NET_ADDR_MANUAL, 0); + if (!ifaddr) { + NET_ERR("Cannot add %s to interface %p", ipv6_addr, iface); + return -EINVAL; + } + + if (net_addr_pton(AF_INET, ipv4_addr, &addr4)) { + NET_ERR("Invalid address: %s", ipv6_addr); + return -EINVAL; + } + + ifaddr = net_if_ipv4_addr_add(iface, &addr4, NET_ADDR_MANUAL, 0); + if (!ifaddr) { + NET_ERR("Cannot add %s to interface %p", ipv4_addr, iface); + return -EINVAL; + } + + NET_DBG("Interface %p VLAN tag %d setup done.", iface, vlan_tag); + + return 0; +} + +static int init_vlan(void) +{ + struct ud ud; + int ret; + + memset(&ud, 0, sizeof(ud)); + + net_if_foreach(iface_cb, &ud); + +#if GPTP_OVER_VLAN + ret = net_eth_vlan_enable(ud.first, GPTP_VLAN_TAG); + if (ret < 0) { + NET_ERR("Cannot enable VLAN for tag %d (%d)", GPTP_VLAN_TAG, + ret); + } +#endif + + /* This sample has two VLANs. For the second one we need to manually + * create IP address for this test. But first the VLAN needs to be + * added to the interface so that IPv6 DAD can work properly. + */ + ret = setup_iface(ud.second, + CONFIG_NET_SAMPLE_IFACE2_MY_IPV6_ADDR, + CONFIG_NET_SAMPLE_IFACE2_MY_IPV4_ADDR, + CONFIG_NET_SAMPLE_IFACE2_VLAN_TAG); + if (ret < 0) { + return ret; + } + + ret = setup_iface(ud.third, + CONFIG_NET_SAMPLE_IFACE3_MY_IPV6_ADDR, + CONFIG_NET_SAMPLE_IFACE3_MY_IPV4_ADDR, + CONFIG_NET_SAMPLE_IFACE3_VLAN_TAG); + if (ret < 0) { + return ret; + } + + return 0; +} +#endif /* CONFIG_NET_VLAN */ + +static void gptp_phase_dis_cb(u8_t *gm_identity, + u16_t *time_base, + struct gptp_scaled_ns *last_gm_ph_change, + double *last_gm_freq_change) +{ + char output[sizeof("xx:xx:xx:xx:xx:xx:xx:xx")]; + static u8_t id[8]; + + if (memcmp(id, gm_identity, sizeof(id))) { + memcpy(id, gm_identity, sizeof(id)); + + NET_DBG("GM %s last phase %d.%lld", + gptp_sprint_clock_id(gm_identity, output, + sizeof(output)), + last_gm_ph_change->high, + last_gm_ph_change->low); + } +} + +static int init_app(void) +{ +#if defined(CONFIG_NET_VLAN) + if (init_vlan() < 0) { + NET_ERR("Cannot setup VLAN"); + } +#endif + + gptp_register_phase_dis_cb(&phase_dis, gptp_phase_dis_cb); + + return 0; +} + +void main(void) +{ + init_app(); +} diff --git a/samples/net/stats/CMakeLists.txt b/samples/net/stats/CMakeLists.txt new file mode 100644 index 0000000000000..46ae3253a38cb --- /dev/null +++ b/samples/net/stats/CMakeLists.txt @@ -0,0 +1,8 @@ +set(KCONFIG_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/Kconfig) + +include($ENV{ZEPHYR_BASE}/cmake/app/boilerplate.cmake NO_POLICY_SCOPE) +project(NONE) + +target_sources(app PRIVATE src/main.c) + +include($ENV{ZEPHYR_BASE}/samples/net/common/common.cmake) diff --git a/samples/net/stats/Kconfig b/samples/net/stats/Kconfig new file mode 100644 index 0000000000000..e98ad08324198 --- /dev/null +++ b/samples/net/stats/Kconfig @@ -0,0 +1,23 @@ +# Kconfig - Private config options for network statistics sample app + +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +mainmenu "Network statistics sample application" + +config ZEPHYR_BASE + string + option env="ZEPHYR_BASE" + +source "$ZEPHYR_BASE/Kconfig.zephyr" + +config SAMPLE_PERIOD + int "How often to print statistics (in seconds)" + default 30 + range 1 3600 + depends on NET_STATISTICS + help + Print statistics after every n. seconds diff --git a/samples/net/stats/README.rst b/samples/net/stats/README.rst new file mode 100644 index 0000000000000..eaf8c2116503a --- /dev/null +++ b/samples/net/stats/README.rst @@ -0,0 +1,58 @@ +.. _net_stats-sample: + +Network Statistics Sample Application +##################################### + +Overview +******** + +This sample application shows how to query network statistics from user +application. + +The source code for this sample application can be found at: +:file:`samples/net/stats`. + +Requirements +************ + +- :ref:`networking_with_qemu` + +Building and Running +******************** + +A good way to run this sample application is with QEMU as described in +:ref:`networking_with_qemu`. + +Follow these steps to build the network statistics sample application: + +.. zephyr-app-commands:: + :zephyr-app: samples/net/stats + :board: + :conf: prj.conf + :goals: build + :compact: + +If everything is configured correctly, the application will periodically print +current network statistics to console. + +.. code-block:: console + + Global network statistics + IPv6 recv 27 sent 8 drop 0 forwarded 0 + IPv6 ND recv 2 sent 5 drop 2 + IPv6 MLD recv 0 sent 3 drop 0 + IPv4 recv 20 sent 0 drop 20 forwarded 0 + IP vhlerr 0 hblener 0 lblener 0 + IP fragerr 0 chkerr 0 protoer 0 + ICMP recv 15 sent 3 drop 13 + ICMP typeer 0 chkerr 0 + UDP recv 0 sent 0 drop 30 + UDP chkerr 0 + TCP bytes recv 0 sent 0 + TCP seg recv 0 sent 0 drop 0 + TCP seg resent 0 chkerr 0 ackerr 0 + TCP seg rsterr 0 rst 0 re-xmit 0 + TCP conn drop 0 connrst 0 + Bytes received 7056 + Bytes sent 564 + Processing err 1 diff --git a/samples/net/stats/prj.conf b/samples/net/stats/prj.conf new file mode 100644 index 0000000000000..b6634ee4bafad --- /dev/null +++ b/samples/net/stats/prj.conf @@ -0,0 +1,57 @@ +# Generic network options +CONFIG_NETWORKING=y +CONFIG_NET_LOG=y +CONFIG_NET_IPV6=y +CONFIG_NET_IPV4=y +CONFIG_NET_DHCPV4=n +CONFIG_NET_UDP=y +CONFIG_NET_TCP=y + +# Network statistics options +CONFIG_NET_STATISTICS=y +CONFIG_NET_STATISTICS_USER_API=y +CONFIG_NET_STATISTICS_PER_INTERFACE=y +CONFIG_NET_STATISTICS_ETHERNET=y + +# How often to print current statistics +CONFIG_SAMPLE_PERIOD=30 + +CONFIG_TEST_RANDOM_GENERATOR=y + +# Network packet configuration +CONFIG_NET_PKT_RX_COUNT=32 +CONFIG_NET_PKT_TX_COUNT=32 +CONFIG_NET_BUF_RX_COUNT=32 +CONFIG_NET_BUF_TX_COUNT=32 + +# IP address configuration +CONFIG_NET_IF_UNICAST_IPV6_ADDR_COUNT=5 +CONFIG_NET_IF_MCAST_IPV6_ADDR_COUNT=5 +CONFIG_NET_IF_UNICAST_IPV4_ADDR_COUNT=1 +CONFIG_NET_MAX_CONTEXTS=10 + +CONFIG_INIT_STACKS=y +CONFIG_PRINTK=y +CONFIG_NET_SHELL=y + +# Application configuration +CONFIG_NET_APP_SERVER=y +CONFIG_NET_APP_NEED_IPV6=y +CONFIG_NET_APP_NEED_IPV4=y +CONFIG_NET_APP_SETTINGS=y + +# First interface will use these settings +CONFIG_NET_APP_MY_IPV6_ADDR="2001:db8::1" + +# Logging +CONFIG_SYS_LOG_SHOW_COLOR=y +CONFIG_SYS_LOG_NET_LEVEL=4 + +# Debug options +CONFIG_NET_DEBUG_NET_PKT=y +CONFIG_NET_DEBUG_L2_ETHERNET=n +CONFIG_NET_DEBUG_ARP=n +CONFIG_NET_DEBUG_IF=n + +# Settings for native_posix ethernet driver (if compiled for that board) +CONFIG_ETH_NATIVE_POSIX=y diff --git a/samples/net/stats/sample.yaml b/samples/net/stats/sample.yaml new file mode 100644 index 0000000000000..aea8f62e7498e --- /dev/null +++ b/samples/net/stats/sample.yaml @@ -0,0 +1,8 @@ +common: + tags: net statistics +sample: + description: Test network statistics functionality + name: Network statistics sample app +tests: + test: + min_ram: 64 diff --git a/samples/net/stats/src/main.c b/samples/net/stats/src/main.c new file mode 100644 index 0000000000000..cec152bbece61 --- /dev/null +++ b/samples/net/stats/src/main.c @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2018 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if 1 +#define SYS_LOG_DOMAIN "net-stats" +#define NET_SYS_LOG_LEVEL SYS_LOG_LEVEL_DEBUG +#define NET_LOG_ENABLED 1 +#endif + +#include +#include + +#include +#include +#include + +static struct k_delayed_work stats_timer; + +#if defined(CONFIG_NET_STATISTICS_PER_INTERFACE) +#define GET_STAT(iface, s) (iface ? iface->stats.s : data->s) +#else +#define GET_STAT(iface, s) data->s +#endif + +static void print_stats(struct net_if *iface, struct net_stats *data) +{ + if (iface) { + printk("Statistics for interface %p [%d]\n", iface, + net_if_get_by_iface(iface)); + } else { + printk("Global network statistics\n"); + } + +#if defined(CONFIG_NET_IPV6) + printk("IPv6 recv %d\tsent\t%d\tdrop\t%d\tforwarded\t%d\n", + GET_STAT(iface, ipv6.recv), + GET_STAT(iface, ipv6.sent), + GET_STAT(iface, ipv6.drop), + GET_STAT(iface, ipv6.forwarded)); +#if defined(CONFIG_NET_IPV6_ND) + printk("IPv6 ND recv %d\tsent\t%d\tdrop\t%d\n", + GET_STAT(iface, ipv6_nd.recv), + GET_STAT(iface, ipv6_nd.sent), + GET_STAT(iface, ipv6_nd.drop)); +#endif /* CONFIG_NET_IPV6_ND */ +#if defined(CONFIG_NET_STATISTICS_MLD) + printk("IPv6 MLD recv %d\tsent\t%d\tdrop\t%d\n", + GET_STAT(iface, ipv6_mld.recv), + GET_STAT(iface, ipv6_mld.sent), + GET_STAT(iface, ipv6_mld.drop)); +#endif /* CONFIG_NET_STATISTICS_MLD */ +#endif /* CONFIG_NET_IPV6 */ + +#if defined(CONFIG_NET_IPV4) + printk("IPv4 recv %d\tsent\t%d\tdrop\t%d\tforwarded\t%d\n", + GET_STAT(iface, ipv4.recv), + GET_STAT(iface, ipv4.sent), + GET_STAT(iface, ipv4.drop), + GET_STAT(iface, ipv4.forwarded)); +#endif /* CONFIG_NET_IPV4 */ + + printk("IP vhlerr %d\thblener\t%d\tlblener\t%d\n", + GET_STAT(iface, ip_errors.vhlerr), + GET_STAT(iface, ip_errors.hblenerr), + GET_STAT(iface, ip_errors.lblenerr)); + printk("IP fragerr %d\tchkerr\t%d\tprotoer\t%d\n", + GET_STAT(iface, ip_errors.fragerr), + GET_STAT(iface, ip_errors.chkerr), + GET_STAT(iface, ip_errors.protoerr)); + + printk("ICMP recv %d\tsent\t%d\tdrop\t%d\n", + GET_STAT(iface, icmp.recv), + GET_STAT(iface, icmp.sent), + GET_STAT(iface, icmp.drop)); + printk("ICMP typeer %d\tchkerr\t%d\n", + GET_STAT(iface, icmp.typeerr), + GET_STAT(iface, icmp.chkerr)); + +#if defined(CONFIG_NET_UDP) + printk("UDP recv %d\tsent\t%d\tdrop\t%d\n", + GET_STAT(iface, udp.recv), + GET_STAT(iface, udp.sent), + GET_STAT(iface, udp.drop)); + printk("UDP chkerr %d\n", + GET_STAT(iface, udp.chkerr)); +#endif + +#if defined(CONFIG_NET_STATISTICS_TCP) + printk("TCP bytes recv %u\tsent\t%d\n", + GET_STAT(iface, tcp.bytes.received), + GET_STAT(iface, tcp.bytes.sent)); + printk("TCP seg recv %d\tsent\t%d\tdrop\t%d\n", + GET_STAT(iface, tcp.recv), + GET_STAT(iface, tcp.sent), + GET_STAT(iface, tcp.drop)); + printk("TCP seg resent %d\tchkerr\t%d\tackerr\t%d\n", + GET_STAT(iface, tcp.resent), + GET_STAT(iface, tcp.chkerr), + GET_STAT(iface, tcp.ackerr)); + printk("TCP seg rsterr %d\trst\t%d\tre-xmit\t%d\n", + GET_STAT(iface, tcp.rsterr), + GET_STAT(iface, tcp.rst), + GET_STAT(iface, tcp.rexmit)); + printk("TCP conn drop %d\tconnrst\t%d\n", + GET_STAT(iface, tcp.conndrop), + GET_STAT(iface, tcp.connrst)); +#endif + +#if defined(CONFIG_NET_STATISTICS_RPL) + printk("RPL DIS recv %d\tsent\t%d\tdrop\t%d\n", + GET_STAT(iface, rpl.dis.recv), + GET_STAT(iface, rpl.dis.sent), + GET_STAT(iface, rpl.dis.drop)); + printk("RPL DIO recv %d\tsent\t%d\tdrop\t%d\n", + GET_STAT(iface, rpl.dio.recv), + GET_STAT(iface, rpl.dio.sent), + GET_STAT(iface, rpl.dio.drop)); + printk("RPL DAO recv %d\tsent\t%d\tdrop\t%d\tforwarded\t%d\n", + GET_STAT(iface, rpl.dao.recv), + GET_STAT(iface, rpl.dao.sent), + GET_STAT(iface, rpl.dao.drop), + GET_STAT(iface, rpl.dao.forwarded)); + printk("RPL DAOACK rcv %d\tsent\t%d\tdrop\t%d\n", + GET_STAT(iface, rpl.dao_ack.recv), + GET_STAT(iface, rpl.dao_ack.sent), + GET_STAT(iface, rpl.dao_ack.drop)); + printk("RPL overflows %d\tl-repairs\t%d\tg-repairs\t%d\n", + GET_STAT(iface, rpl.mem_overflows), + GET_STAT(iface, rpl.local_repairs), + GET_STAT(iface, rpl.global_repairs)); + printk("RPL malformed %d\tresets \t%d\tp-switch\t%d\n", + GET_STAT(iface, rpl.malformed_msgs), + GET_STAT(iface, rpl.resets), + GET_STAT(iface, rpl.parent_switch)); + printk("RPL f-errors %d\tl-errors\t%d\tl-warnings\t%d\n", + GET_STAT(iface, rpl.forward_errors), + GET_STAT(iface, rpl.loop_errors), + GET_STAT(iface, rpl.loop_warnings)); + printk("RPL r-repairs %d\n", + GET_STAT(iface, rpl.root_repairs)); +#endif + + printk("Bytes received %u\n", GET_STAT(iface, bytes.received)); + printk("Bytes sent %u\n", GET_STAT(iface, bytes.sent)); + printk("Processing err %d\n", GET_STAT(iface, processing_error)); +} + +#if defined(CONFIG_NET_STATISTICS_PER_INTERFACE) +static void iface_cb(struct net_if *iface, void *user_data) +{ + struct net_stats *data = user_data; + + net_mgmt(NET_REQUEST_STATS_GET_ALL, iface, data, sizeof(*data)); + + print_stats(iface, data); +} +#endif + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) +static void print_eth_stats(struct net_if *iface, struct net_stats_eth *data) +{ + printk("Statistics for Ethernet interface %p [%d]\n", iface, + net_if_get_by_iface(iface)); + + printk("Bytes received : %u\n", data->bytes.received); + printk("Bytes sent : %u\n", data->bytes.sent); + printk("Packets received : %u\n", data->pkts.rx); + printk("Packets sent : %u\n", data->pkts.tx); + printk("Bcast received : %u\n", data->broadcast.rx); + printk("Bcast sent : %u\n", data->broadcast.tx); + printk("Mcast received : %u\n", data->multicast.rx); + printk("Mcast sent : %u\n", data->multicast.tx); +} + +static void eth_iface_cb(struct net_if *iface, void *user_data) +{ + struct net_stats_eth eth_data; + int ret; + + if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) { + return; + } + + ret = net_mgmt(NET_REQUEST_STATS_GET_ETHERNET, iface, ð_data, + sizeof(eth_data)); + if (ret < 0) { + return; + } + + print_eth_stats(iface, ð_data); +} +#endif + +static void stats(struct k_work *work) +{ + struct net_stats data; + + /* It is also possible to query some specific statistics by setting + * the first request parameter properly. See include/net/net_stats.h + * what requests are available. + */ + net_mgmt(NET_REQUEST_STATS_GET_ALL, NULL, &data, sizeof(data)); + + print_stats(NULL, &data); + +#if defined(CONFIG_NET_STATISTICS_PER_INTERFACE) + net_if_foreach(iface_cb, &data); +#endif + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + net_if_foreach(eth_iface_cb, &data); +#endif + + k_delayed_work_submit(&stats_timer, K_SECONDS(CONFIG_SAMPLE_PERIOD)); +} + +static void init_app(void) +{ + k_delayed_work_init(&stats_timer, stats); + k_delayed_work_submit(&stats_timer, K_SECONDS(CONFIG_SAMPLE_PERIOD)); +} + +void main(void) +{ + /* Register a timer that will collect statistics after every n seconds. + */ + init_app(); +} diff --git a/samples/net/vlan/CMakeLists.txt b/samples/net/vlan/CMakeLists.txt new file mode 100644 index 0000000000000..46ae3253a38cb --- /dev/null +++ b/samples/net/vlan/CMakeLists.txt @@ -0,0 +1,8 @@ +set(KCONFIG_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/Kconfig) + +include($ENV{ZEPHYR_BASE}/cmake/app/boilerplate.cmake NO_POLICY_SCOPE) +project(NONE) + +target_sources(app PRIVATE src/main.c) + +include($ENV{ZEPHYR_BASE}/samples/net/common/common.cmake) diff --git a/samples/net/vlan/Kconfig b/samples/net/vlan/Kconfig new file mode 100644 index 0000000000000..1663ba57b3a7f --- /dev/null +++ b/samples/net/vlan/Kconfig @@ -0,0 +1,41 @@ +# Kconfig - Private config options for VLAN sample app + +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +mainmenu "Networking VLAN sample application" + +config ZEPHYR_BASE + string + option env="ZEPHYR_BASE" + +source "$ZEPHYR_BASE/Kconfig.zephyr" + +config SAMPLE_VLAN_TAG + int "Virtual lan tag used in VLAN sample app" + default 100 + range 0 4094 + depends on NET_VLAN + help + Set virtual lan tag (id) that is used in VLAN sample application. + +config SAMPLE_VLAN_TAG_2 + int "Second VLAN tag used in VLAN sample app" + default 200 + range 0 4094 + depends on NET_VLAN + help + Set virtual lan tag (id) that is used in VLAN sample application. + +config SAMPLE_IPV6_ADDR_2 + string "My IPv6 address for second interface" + help + The value depends on your network setup. + +config SAMPLE_IPV4_ADDR_2 + string "My IPv4 address for second interface" + help + The value depends on your network setup. diff --git a/samples/net/vlan/README.rst b/samples/net/vlan/README.rst new file mode 100644 index 0000000000000..a1190a5b73fef --- /dev/null +++ b/samples/net/vlan/README.rst @@ -0,0 +1,60 @@ +.. _vlan-sample: + +Virtual LAN Sample Application +############################## + +Overview +******** + +The VLAN sample application for Zephyr will setup two virtual LAN networks. +The application sample enables net-shell and allows users to view VLAN settings. + +The source code for this sample application can be found at: +:file:`samples/net/vlan`. + +Requirements +************ + +- :ref:`networking_with_qemu` + +Building and Running +******************** + +A good way to run this VLAN application is with QEMU as described in +:ref:`networking_with_qemu`. +Currently only one VLAN network (tag) is supported when Zephyr is run inside +QEMU. If you're using a FRDM-K64F board, then multiple VLAN networks can be +configured. Note that VLAN is only supported for boards that have an ethernet +port or that support USB networking. + +Follow these steps to build the VLAN sample application: + +.. zephyr-app-commands:: + :zephyr-app: samples/net/vlan + :board: + :conf: prj.conf + :goals: build + :compact: + +The default configuration file prj.conf creates two virtual LAN networks +with these settings: + +- VLAN tag 100: IPv4 198.51.100.1 and IPv6 2001:db8:100::1 +- VLAN tag 200: IPv4 203.0.113.1 and IPv6 2001:db8:200::1 + +Setting up Linux Host +===================== + +The :file:`samples/net/vlan/vlan-setup-linux.sh` provides a script that can be +executed on the Linux host. It creates two VLANs on the Linux host and creates +routes to Zephyr. + +If everything is configured correctly, you will be able to successfully execute +the following commands on the Linux host. + +.. code-block:: console + + ping -c 1 2001:db8:100::1 + ping -c 1 198.51.100.1 + ping -c 1 2001:db8:200::1 + ping -c 1 203.0.113.1 diff --git a/samples/net/vlan/prj.conf b/samples/net/vlan/prj.conf new file mode 100644 index 0000000000000..2ee87ae69ee8a --- /dev/null +++ b/samples/net/vlan/prj.conf @@ -0,0 +1,63 @@ +CONFIG_NETWORKING=y +CONFIG_NET_LOG=y +CONFIG_NET_IPV6=y +CONFIG_NET_IPV4=y +CONFIG_NET_DHCPV4=n +CONFIG_NET_UDP=y +CONFIG_NET_TCP=y +CONFIG_NET_STATISTICS=y + +CONFIG_TEST_RANDOM_GENERATOR=y + +CONFIG_NET_PKT_RX_COUNT=32 +CONFIG_NET_PKT_TX_COUNT=32 +CONFIG_NET_BUF_RX_COUNT=32 +CONFIG_NET_BUF_TX_COUNT=32 +CONFIG_NET_IF_UNICAST_IPV6_ADDR_COUNT=5 +CONFIG_NET_IF_MCAST_IPV6_ADDR_COUNT=5 +CONFIG_NET_IF_UNICAST_IPV4_ADDR_COUNT=1 +CONFIG_NET_MAX_CONTEXTS=10 + +CONFIG_INIT_STACKS=y +CONFIG_PRINTK=y +CONFIG_NET_SHELL=y + +# Ethernet is needed for VLAN +CONFIG_NET_L2_ETHERNET=y + +CONFIG_NET_APP_SERVER=y +CONFIG_NET_APP_NEED_IPV6=y +CONFIG_NET_APP_NEED_IPV4=y +CONFIG_NET_APP_SETTINGS=y + +# First ethernet interface will use these settings +CONFIG_NET_APP_MY_IPV6_ADDR="2001:db8:100::1" +CONFIG_NET_APP_PEER_IPV6_ADDR="2001:db8:100::2" +# TEST-NET-2 from RFC 5737 +CONFIG_NET_APP_MY_IPV4_ADDR="198.51.100.1" +CONFIG_NET_APP_PEER_IPV4_ADDR="198.51.100.2" +# VLAN tag for the first interface +CONFIG_NET_VLAN_SAMPLE_TAG=100 + +# Settings for the second network interface +CONFIG_SAMPLE_IPV6_ADDR_2="2001:db8:200::1" +CONFIG_NET_VLAN_SAMPLE_TAG_2=200 +# TEST-NET-3 from RFC 5737 +CONFIG_SAMPLE_IPV4_ADDR_2="203.0.113.1" + +# Logging +CONFIG_SYS_LOG_SHOW_COLOR=y +CONFIG_SYS_LOG_NET_LEVEL=4 + +CONFIG_NET_DEBUG_NET_PKT=y +CONFIG_NET_DEBUG_L2_ETHERNET=n +CONFIG_NET_DEBUG_ARP=n +CONFIG_NET_DEBUG_IF=n + +# VLAN settings. Note that currently SLIP only supports one VLAN tag, +# and that is enforced by Kconfig file. +CONFIG_NET_VLAN=y +CONFIG_NET_VLAN_COUNT=2 + +# Settings for native_posix ethernet driver (if compiled for that board) +CONFIG_ETH_NATIVE_POSIX=y diff --git a/samples/net/vlan/sample.yaml b/samples/net/vlan/sample.yaml new file mode 100644 index 0000000000000..cc6c57379774f --- /dev/null +++ b/samples/net/vlan/sample.yaml @@ -0,0 +1,9 @@ +common: + harness: net + tags: net vlan +sample: + description: Test VLAN functionality + name: VLAN sample app +tests: + test: + depends_on: netif diff --git a/samples/net/vlan/src/main.c b/samples/net/vlan/src/main.c new file mode 100644 index 0000000000000..929e9d5a44956 --- /dev/null +++ b/samples/net/vlan/src/main.c @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2018 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if 1 +#define SYS_LOG_DOMAIN "vlan-app" +#define NET_SYS_LOG_LEVEL SYS_LOG_LEVEL_DEBUG +#define NET_LOG_ENABLED 1 +#endif + +#include +#include + +#include +#include +#include +#include + +#if CONFIG_NET_VLAN_COUNT > 1 +#define CREATE_MULTIPLE_TAGS +#endif + +struct ud { + struct net_if *first; + struct net_if *second; +}; + +#if defined(CREATE_MULTIPLE_TAGS) +static void iface_cb(struct net_if *iface, void *user_data) +{ + struct ud *ud = user_data; + + if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) { + return; + } + + if (iface == ud->first) { + return; + } + + ud->second = iface; +} +#endif + +static int init_app(void) +{ + struct net_if *iface; + int ret; + +#if defined(CREATE_MULTIPLE_TAGS) + struct net_if_addr *ifaddr; + struct in_addr addr4; + struct in6_addr addr6; + struct ud ud; +#endif + + iface = net_if_get_first_by_type(&NET_L2_GET_NAME(ETHERNET)); + if (!iface) { + NET_ERR("No ethernet interfaces found."); + return -ENOENT; + } + + ret = net_eth_vlan_enable(iface, CONFIG_SAMPLE_VLAN_TAG); + if (ret < 0) { + NET_ERR("Cannot enable VLAN for tag %d (%d)", + CONFIG_SAMPLE_VLAN_TAG, ret); + } + +#if defined(CREATE_MULTIPLE_TAGS) + ud.first = iface; + ud.second = NULL; + + net_if_foreach(iface_cb, &ud); + + /* This sample has two VLANs. For the second one we need to manually + * create IP address for this test. But first the VLAN needs to be + * added to the interface so that IPv6 DAD can work properly. + */ + ret = net_eth_vlan_enable(ud.second, CONFIG_SAMPLE_VLAN_TAG_2); + if (ret < 0) { + NET_ERR("Cannot enable VLAN for tag %d (%d)", + CONFIG_SAMPLE_VLAN_TAG_2, ret); + } + + if (net_addr_pton(AF_INET6, CONFIG_SAMPLE_IPV6_ADDR_2, &addr6)) { + NET_ERR("Invalid address: %s", CONFIG_SAMPLE_IPV6_ADDR_2); + return -EINVAL; + } + + ifaddr = net_if_ipv6_addr_add(ud.second, &addr6, NET_ADDR_MANUAL, 0); + if (!ifaddr) { + NET_ERR("Cannot add %s to interface %p", + CONFIG_SAMPLE_IPV6_ADDR_2, ud.second); + return -EINVAL; + } + + if (net_addr_pton(AF_INET, CONFIG_SAMPLE_IPV4_ADDR_2, &addr4)) { + NET_ERR("Invalid address: %s", CONFIG_SAMPLE_IPV4_ADDR_2); + return -EINVAL; + } + + ifaddr = net_if_ipv4_addr_add(ud.second, &addr4, NET_ADDR_MANUAL, 0); + if (!ifaddr) { + NET_ERR("Cannot add %s to interface %p", + CONFIG_SAMPLE_IPV4_ADDR_2, ud.second); + return -EINVAL; + } +#endif + + return ret; +} + +void main(void) +{ + init_app(); +} diff --git a/samples/net/vlan/vlan-setup-linux.sh b/samples/net/vlan/vlan-setup-linux.sh new file mode 100755 index 0000000000000..28892a7cd4442 --- /dev/null +++ b/samples/net/vlan/vlan-setup-linux.sh @@ -0,0 +1,74 @@ +#!/bin/sh +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Setup virtual lan (VLAN) in Linux side that would work with this sample. + +if [ -z "$1" ]; then + echo "Network interface is missing." + echo "This is the network interface where the VLAN will be running." + echo "Example: eth0, tap0 etc." + exit 1 +fi + +if [ `id -u` != 0 ]; then + echo "Run this script as a root user!" + exit 2 +fi + +IFACE="$1" + +VLAN_NAME_PREFIX="vlan" + +PREFIX_1_IPV6="2001:db8:100" +PREFIXLEN_1_IPV6="64" +PREFIX_2_IPV6="2001:db8:200" +PREFIXLEN_2_IPV6="64" + +# From RFC 5737 +PREFIX_1_IPV4="198.51.100" +PREFIXLEN_1_IPV4="24" +PREFIX_2_IPV4="203.0.113" +PREFIXLEN_2_IPV4="24" + +ip link add link ${IFACE} name ${VLAN_NAME_PREFIX}.100 type vlan id 100 +ip link add link ${IFACE} name ${VLAN_NAME_PREFIX}.200 type vlan id 200 + +ip link set ${VLAN_NAME_PREFIX}.100 up +ip link set ${VLAN_NAME_PREFIX}.200 up + +ip -6 addr add ${PREFIX_1_IPV6}::2 dev ${VLAN_NAME_PREFIX}.100 +ip -6 route add ${PREFIX_1_IPV6}::/${PREFIXLEN_1_IPV6} \ + dev ${VLAN_NAME_PREFIX}.100 + +ip -6 addr add ${PREFIX_2_IPV6}::2 dev ${VLAN_NAME_PREFIX}.200 +ip -6 route add ${PREFIX_2_IPV6}::/${PREFIXLEN_2_IPV6} \ + dev ${VLAN_NAME_PREFIX}.200 + +ip addr add ${PREFIX_1_IPV4}.2 dev ${VLAN_NAME_PREFIX}.100 +ip route add ${PREFIX_1_IPV4}/${PREFIXLEN_1_IPV4} dev ${VLAN_NAME_PREFIX}.100 + +ip addr add ${PREFIX_2_IPV4}.2 dev ${VLAN_NAME_PREFIX}.200 +ip route add ${PREFIX_2_IPV4}/${PREFIXLEN_2_IPV4} dev ${VLAN_NAME_PREFIX}.200 + + +# You can remove the virtual interface like this +# ip link del link eth0 dev vlan.100 +# ip link del link eth0 dev vlan.200 + +# If your devices HW MAC address changes when rebooting or flashing, +# then you can flush the neighbor cache in Linux like this: +# ip neigh flush dev vlan.100 +# ip neigh flush dev vlan.200 diff --git a/scripts/sanitycheck b/scripts/sanitycheck index 058d5af739946..06e794c63c9ab 100755 --- a/scripts/sanitycheck +++ b/scripts/sanitycheck @@ -586,7 +586,7 @@ class SizeCalculator: "ccm_data"] # These get copied into RAM only on non-XIP ro_sections = ["text", "ctors", "init_array", "reset", "object_access", - "rodata", "devconfig", "net_l2", "vector"] + "rodata", "devconfig", "net_l2", "vector", "ptp_clock"] def __init__(self, filename, extra_sections): """Constructor diff --git a/subsys/logging/sys_log_net.c b/subsys/logging/sys_log_net.c index adc54724801d4..273f97cec0be6 100644 --- a/subsys/logging/sys_log_net.c +++ b/subsys/logging/sys_log_net.c @@ -190,11 +190,12 @@ void syslog_net_hook_install(void) #endif } else if (server_addr.sa_family == AF_INET) { #if defined(CONFIG_NET_IPV4) - /* FIXME: instead of taking the first IPv4 address of an - * interface, take the proper one according to routing - */ - struct net_if_ipv4 *ipv4 = - net_if_get_default()->config.ip.ipv4; + struct net_if_ipv4 *ipv4; + struct net_if *iface; + + iface = net_if_ipv4_select_src_iface( + &net_sin(&server_addr)->sin_addr); + ipv4 = iface->config.ip.ipv4; net_ipaddr_copy(&local_addr4.sin_addr, &ipv4->unicast[0].address.in_addr); diff --git a/subsys/net/ip/Kconfig b/subsys/net/ip/Kconfig index 2156e54814b2e..e2c7d96f748ef 100644 --- a/subsys/net/ip/Kconfig +++ b/subsys/net/ip/Kconfig @@ -417,6 +417,23 @@ config NET_DEFAULT_IF_DUMMY endchoice +config NET_PKT_TIMESTAMP + bool "Enable network packet timestamp support" + default n + help + Enable network packet timestamp support. This is needed for + example in gPTP which needs to know how long it takes to send + a network packet. + +config NET_PKT_TIMESTAMP_STACK_SIZE + int "Timestamp thread stack size" + default 1024 + depends on NET_PKT_TIMESTAMP + help + Set the timestamp thread stack size in bytes. The timestamp + thread waits for timestamped TX frames and calls registered + callbacks. + source "subsys/net/ip/Kconfig.stack" source "subsys/net/ip/l2/Kconfig" diff --git a/subsys/net/ip/Kconfig.ipv4 b/subsys/net/ip/Kconfig.ipv4 index 57cb83e9a4c0d..ee1c9ada5638b 100644 --- a/subsys/net/ip/Kconfig.ipv4 +++ b/subsys/net/ip/Kconfig.ipv4 @@ -24,6 +24,7 @@ config NET_INITIAL_TTL config NET_IF_MAX_IPV4_COUNT int "Max number of IPv4 network interfaces in the system" default 1 + default NET_VLAN_COUNT if NET_VLAN help This tells how many network interfaces there will be in the system that will have IPv4 enabled. diff --git a/subsys/net/ip/Kconfig.ipv6 b/subsys/net/ip/Kconfig.ipv6 index 8d61bc8446031..c6f9430c0b7a2 100644 --- a/subsys/net/ip/Kconfig.ipv6 +++ b/subsys/net/ip/Kconfig.ipv6 @@ -18,6 +18,7 @@ if NET_IPV6 config NET_IF_MAX_IPV6_COUNT int "Max number of IPv6 network interfaces in the system" default 1 + default NET_VLAN_COUNT if NET_VLAN help This tells how many network interfaces there will be in the system that will have IPv6 enabled. diff --git a/subsys/net/ip/Kconfig.stats b/subsys/net/ip/Kconfig.stats index 27d8721c6a9e3..4035107a85eda 100644 --- a/subsys/net/ip/Kconfig.stats +++ b/subsys/net/ip/Kconfig.stats @@ -14,6 +14,12 @@ menuconfig NET_STATISTICS if NET_STATISTICS +config NET_STATISTICS_PER_INTERFACE + bool "Collect statistics per network interface" + default y + help + Collect statistics also for each network interface. + config NET_STATISTICS_USER_API bool "Expose statistics through NET MGMT API" select NET_MGMT @@ -87,4 +93,13 @@ config NET_STATISTICS_MLD help Keep track of MLD related statistics +config NET_STATISTICS_ETHERNET + bool "Ethernet statistics" + depends on NET_L2_ETHERNET + default y + help + Keep track of Ethernet related statistics. Note that this + requires support from the ethernet driver. The driver needs + to collect the statistics. + endif # NET_STATISTICS diff --git a/subsys/net/ip/connection.c b/subsys/net/ip/connection.c index 3c590cc64ecd2..1e9c6a2d7a7d0 100644 --- a/subsys/net/ip/connection.c +++ b/subsys/net/ip/connection.c @@ -915,7 +915,7 @@ enum net_verdict net_conn_input(enum net_ip_protocol proto, struct net_pkt *pkt) chksum_calc = net_udp_get_chksum(pkt, pkt->frags); if (chksum != chksum_calc) { - net_stats_update_udp_chkerr(); + net_stats_update_udp_chkerr(net_pkt_iface(pkt)); NET_DBG("UDP checksum mismatch " "expected 0x%04x got 0x%04x, dropping packet.", ntohs(chksum_calc), ntohs(chksum)); @@ -931,7 +931,8 @@ enum net_verdict net_conn_input(enum net_ip_protocol proto, struct net_pkt *pkt) chksum_calc = net_tcp_get_chksum(pkt, pkt->frags); if (chksum != chksum_calc) { - net_stats_update_tcp_seg_chkerr(); + net_stats_update_tcp_seg_chkerr( + net_pkt_iface(pkt)); NET_DBG("TCP checksum mismatch " "expected 0x%04x got 0x%04x, dropping packet.", ntohs(chksum_calc), ntohs(chksum)); @@ -963,7 +964,7 @@ enum net_verdict net_conn_input(enum net_ip_protocol proto, struct net_pkt *pkt) goto drop; } - net_stats_update_per_proto_recv(proto); + net_stats_update_per_proto_recv(net_pkt_iface(pkt), proto); return NET_OK; } @@ -991,12 +992,12 @@ enum net_verdict net_conn_input(enum net_ip_protocol proto, struct net_pkt *pkt) send_icmp_error(pkt); if (IS_ENABLED(CONFIG_NET_TCP) && proto == IPPROTO_TCP) { - net_stats_update_tcp_seg_connrst(); + net_stats_update_tcp_seg_connrst(net_pkt_iface(pkt)); } } drop: - net_stats_update_per_proto_drop(proto); + net_stats_update_per_proto_drop(net_pkt_iface(pkt), proto); return NET_DROP; } diff --git a/subsys/net/ip/icmpv4.c b/subsys/net/ip/icmpv4.c index 02c2a0935da41..52e6e49ada8ef 100644 --- a/subsys/net/ip/icmpv4.c +++ b/subsys/net/ip/icmpv4.c @@ -144,11 +144,11 @@ static inline enum net_verdict handle_echo_request(struct net_pkt *pkt) #endif /* CONFIG_NET_DEBUG_ICMPV4 */ if (net_send_data(pkt) < 0) { - net_stats_update_icmp_drop(); + net_stats_update_icmp_drop(net_pkt_iface(pkt)); return NET_DROP; } - net_stats_update_icmp_sent(); + net_stats_update_icmp_sent(net_pkt_iface(pkt)); return NET_OK; } @@ -244,12 +244,13 @@ int net_icmpv4_send_echo_request(struct net_if *iface, net_icmpv4_set_chksum(pkt, pkt->frags); if (net_send_data(pkt) >= 0) { - net_stats_update_icmp_sent(); + net_stats_update_icmp_sent(iface); return 0; } + net_stats_update_icmp_drop(iface); + net_pkt_unref(pkt); - net_stats_update_icmp_drop(); return -EIO; } @@ -349,7 +350,7 @@ int net_icmpv4_send_error(struct net_pkt *orig, u8_t type, u8_t code) #endif /* CONFIG_NET_DEBUG_ICMPV4 */ if (net_send_data(pkt) >= 0) { - net_stats_update_icmp_sent(); + net_stats_update_icmp_sent(iface); return 0; } @@ -357,7 +358,7 @@ int net_icmpv4_send_error(struct net_pkt *orig, u8_t type, u8_t code) net_pkt_unref(pkt); drop_no_pkt: - net_stats_update_icmp_drop(); + net_stats_update_icmp_drop(iface); return err; } @@ -377,7 +378,7 @@ enum net_verdict net_icmpv4_input(struct net_pkt *pkt, { struct net_icmpv4_handler *cb; - net_stats_update_icmp_recv(); + net_stats_update_icmp_recv(net_pkt_iface(pkt)); SYS_SLIST_FOR_EACH_CONTAINER(&handlers, cb, node) { if (cb->type == type && (cb->code == code || cb->code == 0)) { @@ -385,7 +386,7 @@ enum net_verdict net_icmpv4_input(struct net_pkt *pkt, } } - net_stats_update_icmp_drop(); + net_stats_update_icmp_drop(net_pkt_iface(pkt)); return NET_DROP; } diff --git a/subsys/net/ip/icmpv6.c b/subsys/net/ip/icmpv6.c index 8ab1201b53292..22a81076cb8c1 100644 --- a/subsys/net/ip/icmpv6.c +++ b/subsys/net/ip/icmpv6.c @@ -486,7 +486,7 @@ static enum net_verdict handle_echo_request(struct net_pkt *orig) } net_pkt_unref(orig); - net_stats_update_icmp_sent(); + net_stats_update_icmp_sent(iface); return NET_OK; @@ -494,7 +494,7 @@ static enum net_verdict handle_echo_request(struct net_pkt *orig) net_pkt_unref(pkt); drop_no_pkt: - net_stats_update_icmp_drop(); + net_stats_update_icmp_drop(iface); return NET_DROP; } @@ -504,7 +504,7 @@ int net_icmpv6_send_error(struct net_pkt *orig, u8_t type, u8_t code, { struct net_pkt *pkt; struct net_buf *frag; - struct net_if *iface; + struct net_if *iface = net_pkt_iface(orig); size_t extra_len, reserve; int err = -EIO; @@ -519,8 +519,6 @@ int net_icmpv6_send_error(struct net_pkt *orig, u8_t type, u8_t code, } } - iface = net_pkt_iface(orig); - pkt = net_pkt_get_reserve_tx(0, PKT_WAIT_TIME); if (!pkt) { err = -ENOMEM; @@ -613,7 +611,7 @@ int net_icmpv6_send_error(struct net_pkt *orig, u8_t type, u8_t code, #endif /* CONFIG_NET_DEBUG_ICMPV6 */ if (net_send_data(pkt) >= 0) { - net_stats_update_icmp_sent(); + net_stats_update_icmp_sent(iface); return 0; } @@ -621,7 +619,7 @@ int net_icmpv6_send_error(struct net_pkt *orig, u8_t type, u8_t code, net_pkt_unref(pkt); drop_no_pkt: - net_stats_update_icmp_drop(); + net_stats_update_icmp_drop(iface); return err; } @@ -673,13 +671,13 @@ int net_icmpv6_send_echo_request(struct net_if *iface, #endif /* CONFIG_NET_DEBUG_ICMPV6 */ if (net_send_data(pkt) >= 0) { - net_stats_update_icmp_sent(); + net_stats_update_icmp_sent(iface); return 0; } drop: net_pkt_unref(pkt); - net_stats_update_icmp_drop(); + net_stats_update_icmp_drop(iface); return -EIO; } @@ -689,7 +687,7 @@ enum net_verdict net_icmpv6_input(struct net_pkt *pkt, { struct net_icmpv6_handler *cb; - net_stats_update_icmp_recv(); + net_stats_update_icmp_recv(net_pkt_iface(pkt)); SYS_SLIST_FOR_EACH_CONTAINER(&handlers, cb, node) { if (cb->type == type && (cb->code == code || cb->code == 0)) { @@ -697,7 +695,7 @@ enum net_verdict net_icmpv6_input(struct net_pkt *pkt, } } - net_stats_update_icmp_drop(); + net_stats_update_icmp_drop(net_pkt_iface(pkt)); return NET_DROP; } diff --git a/subsys/net/ip/ipv4.c b/subsys/net/ip/ipv4.c index ea262ab229e8a..0cca00ab577d1 100644 --- a/subsys/net/ip/ipv4.c +++ b/subsys/net/ip/ipv4.c @@ -218,6 +218,6 @@ enum net_verdict net_ipv4_process_pkt(struct net_pkt *pkt) } drop: - net_stats_update_ipv4_drop(); + net_stats_update_ipv4_drop(net_pkt_iface(pkt)); return NET_DROP; } diff --git a/subsys/net/ip/ipv6.c b/subsys/net/ip/ipv6.c index f0a74605d932e..9f0ed1e492000 100644 --- a/subsys/net/ip/ipv6.c +++ b/subsys/net/ip/ipv6.c @@ -1359,13 +1359,13 @@ int net_ipv6_send_na(struct net_if *iface, const struct in6_addr *src, goto drop; } - net_stats_update_ipv6_nd_sent(); + net_stats_update_ipv6_nd_sent(net_pkt_iface(pkt)); return 0; drop: + net_stats_update_ipv6_nd_drop(net_pkt_iface(pkt)); net_pkt_unref(pkt); - net_stats_update_ipv6_nd_drop(); return -EINVAL; } @@ -1412,7 +1412,7 @@ static enum net_verdict handle_ns_input(struct net_pkt *pkt) &NET_IPV6_HDR(pkt)->dst, &ns_hdr->tgt); - net_stats_update_ipv6_nd_recv(); + net_stats_update_ipv6_nd_recv(net_pkt_iface(pkt)); if ((total_len < (sizeof(struct net_ipv6_hdr) + sizeof(struct net_icmp_hdr) + @@ -1641,7 +1641,7 @@ static enum net_verdict handle_ns_input(struct net_pkt *pkt) return NET_DROP; drop: - net_stats_update_ipv6_nd_drop(); + net_stats_update_ipv6_nd_drop(net_pkt_iface(pkt)); return NET_DROP; } @@ -1963,7 +1963,7 @@ static enum net_verdict handle_na_input(struct net_pkt *pkt) &NET_IPV6_HDR(pkt)->dst, &na_hdr->tgt); - net_stats_update_ipv6_nd_recv(); + net_stats_update_ipv6_nd_recv(net_pkt_iface(pkt)); if ((total_len < (sizeof(struct net_ipv6_hdr) + sizeof(struct net_icmp_hdr) + @@ -2042,12 +2042,12 @@ static enum net_verdict handle_na_input(struct net_pkt *pkt) net_pkt_unref(pkt); - net_stats_update_ipv6_nd_sent(); + net_stats_update_ipv6_nd_sent(net_pkt_iface(pkt)); return NET_OK; drop: - net_stats_update_ipv6_nd_drop(); + net_stats_update_ipv6_nd_drop(net_pkt_iface(pkt)); return NET_DROP; } @@ -2193,13 +2193,13 @@ int net_ipv6_send_ns(struct net_if *iface, goto drop; } - net_stats_update_ipv6_nd_sent(); + net_stats_update_ipv6_nd_sent(net_pkt_iface(pkt)); return 0; drop: + net_stats_update_ipv6_nd_drop(net_pkt_iface(pkt)); net_pkt_unref(pkt); - net_stats_update_ipv6_nd_drop(); return -EINVAL; } @@ -2269,13 +2269,13 @@ int net_ipv6_send_rs(struct net_if *iface) goto drop; } - net_stats_update_ipv6_nd_sent(); + net_stats_update_ipv6_nd_sent(net_pkt_iface(pkt)); return 0; drop: + net_stats_update_ipv6_nd_drop(net_pkt_iface(pkt)); net_pkt_unref(pkt); - net_stats_update_ipv6_nd_drop(); return -EINVAL; } @@ -2581,7 +2581,7 @@ static enum net_verdict handle_ra_input(struct net_pkt *pkt) &NET_IPV6_HDR(pkt)->src, &NET_IPV6_HDR(pkt)->dst); - net_stats_update_ipv6_nd_recv(); + net_stats_update_ipv6_nd_recv(net_pkt_iface(pkt)); if ((total_len < (sizeof(struct net_ipv6_hdr) + sizeof(struct net_icmp_hdr) + @@ -2769,7 +2769,7 @@ static enum net_verdict handle_ra_input(struct net_pkt *pkt) return NET_OK; drop: - net_stats_update_ipv6_nd_drop(); + net_stats_update_ipv6_nd_drop(net_pkt_iface(pkt)); return NET_DROP; } @@ -2864,15 +2864,16 @@ static int send_mldv2_raw(struct net_if *iface, struct net_buf *frags) goto drop; } - net_stats_update_icmp_sent(); - net_stats_update_ipv6_mld_sent(); + net_stats_update_icmp_sent(net_pkt_iface(pkt)); + net_stats_update_ipv6_mld_sent(net_pkt_iface(pkt)); return 0; drop: + net_stats_update_icmp_drop(net_pkt_iface(pkt)); + net_stats_update_ipv6_mld_drop(net_pkt_iface(pkt)); + net_pkt_unref(pkt); - net_stats_update_icmp_drop(); - net_stats_update_ipv6_mld_drop(); return ret; } @@ -2999,7 +3000,7 @@ static enum net_verdict handle_mld_query(struct net_pkt *pkt) &NET_IPV6_HDR(pkt)->src, &NET_IPV6_HDR(pkt)->dst); - net_stats_update_ipv6_mld_recv(); + net_stats_update_ipv6_mld_recv(net_pkt_iface(pkt)); /* offset tells now where the ICMPv6 header is starting */ frag = net_frag_get_pos(pkt, @@ -3044,7 +3045,7 @@ static enum net_verdict handle_mld_query(struct net_pkt *pkt) send_mld_report(net_pkt_iface(pkt)); drop: - net_stats_update_ipv6_mld_drop(); + net_stats_update_ipv6_mld_drop(net_pkt_iface(pkt)); return NET_DROP; } @@ -4158,7 +4159,7 @@ enum net_verdict net_ipv6_process_pkt(struct net_pkt *pkt) if (real_len != pkt_len) { NET_DBG("IPv6 packet size %d pkt len %d", pkt_len, real_len); - net_stats_update_ipv6_drop(); + net_stats_update_ipv6_drop(net_pkt_iface(pkt)); goto drop; } @@ -4175,7 +4176,7 @@ enum net_verdict net_ipv6_process_pkt(struct net_pkt *pkt) if (net_is_ipv6_addr_mcast(&hdr->src)) { NET_DBG("Dropping src multicast packet"); - net_stats_update_ipv6_drop(); + net_stats_update_ipv6_drop(net_pkt_iface(pkt)); goto drop; } @@ -4194,7 +4195,7 @@ enum net_verdict net_ipv6_process_pkt(struct net_pkt *pkt) NET_DBG("IPv6 packet in pkt %p not for me", pkt); #endif /* CONFIG_NET_ROUTE */ - net_stats_update_ipv6_drop(); + net_stats_update_ipv6_drop(net_pkt_iface(pkt)); goto drop; } @@ -4209,7 +4210,7 @@ enum net_verdict net_ipv6_process_pkt(struct net_pkt *pkt) &hdr->dst)) { no_route_info(pkt, &hdr->src, &hdr->dst); - net_stats_update_ipv6_drop(); + net_stats_update_ipv6_drop(net_pkt_iface(pkt)); goto drop; } @@ -4349,7 +4350,7 @@ enum net_verdict net_ipv6_process_pkt(struct net_pkt *pkt) offset - 1); NET_DBG("Unknown next header type"); - net_stats_update_ip_errors_protoerr(); + net_stats_update_ip_errors_protoerr(net_pkt_iface(pkt)); return NET_DROP; } diff --git a/subsys/net/ip/l2/CMakeLists.txt b/subsys/net/ip/l2/CMakeLists.txt index abc7b3b912db1..5338c32433961 100644 --- a/subsys/net/ip/l2/CMakeLists.txt +++ b/subsys/net/ip/l2/CMakeLists.txt @@ -25,3 +25,7 @@ endif() if(CONFIG_NET_L2_OPENTHREAD) add_subdirectory(openthread) endif() + +if(CONFIG_NET_GPTP) + add_subdirectory(gptp) +endif() diff --git a/subsys/net/ip/l2/Kconfig b/subsys/net/ip/l2/Kconfig index 551c0ceda5dd0..b946b72a860b9 100644 --- a/subsys/net/ip/l2/Kconfig +++ b/subsys/net/ip/l2/Kconfig @@ -22,6 +22,21 @@ config NET_L2_ETHERNET If NET_SLIP_TAP is selected, NET_L2_ETHERNET will enable to fully simulate Ethernet through SLIP. +config NET_VLAN + bool "Enable virtual lan support" + default n + depends on NET_L2_ETHERNET + help + Enables virtual lan (VLAN) support for Ethernet. + +config NET_VLAN_COUNT + int "Max VLAN tags supported in the system" + default 1 + range 1 255 + depends on NET_VLAN + help + How many VLAN tags can be configured. + config NET_DEBUG_L2_ETHERNET bool "Debug Ethernet L2 layer" default n @@ -109,6 +124,8 @@ config NET_L2_BT_SHELL source "subsys/net/ip/l2/ieee802154/Kconfig" +source "subsys/net/ip/l2/gptp/Kconfig" + config NET_ARP bool "Enable ARP" default y diff --git a/subsys/net/ip/l2/arp.c b/subsys/net/ip/l2/arp.c index 97009ff274ab7..ea4f2cb003949 100644 --- a/subsys/net/ip/l2/arp.c +++ b/subsys/net/ip/l2/arp.c @@ -101,14 +101,24 @@ static inline struct net_pkt *prepare_arp(struct net_if *iface, struct arp_entry *entry, struct net_pkt *pending) { +#if defined(CONFIG_NET_VLAN) + u16_t vlan_tag = net_eth_get_vlan_tag(iface); +#endif + struct ethernet_context *ctx = net_if_l2_data(iface); + int eth_hdr_len = sizeof(struct net_eth_hdr); struct net_pkt *pkt; struct net_buf *frag; struct net_arp_hdr *hdr; struct net_eth_hdr *eth; struct in_addr *my_addr; - pkt = net_pkt_get_reserve_tx(sizeof(struct net_eth_hdr), - NET_BUF_TIMEOUT); +#if defined(CONFIG_NET_VLAN) + if (ctx->vlan_enabled && vlan_tag != NET_VLAN_TAG_UNSPEC) { + eth_hdr_len = sizeof(struct net_eth_vlan_hdr); + } +#endif + + pkt = net_pkt_get_reserve_tx(eth_hdr_len, NET_BUF_TIMEOUT); if (!pkt) { return NULL; } @@ -124,7 +134,13 @@ static inline struct net_pkt *prepare_arp(struct net_if *iface, net_pkt_set_family(pkt, AF_INET); hdr = NET_ARP_HDR(pkt); - eth = NET_ETH_HDR(pkt); + +#if defined(CONFIG_NET_VLAN) + net_pkt_set_vlan_tag(pkt, vlan_tag); +#endif + + eth = net_eth_fill_header(ctx, pkt, frag, htons(NET_ETH_PTYPE_ARP), + NULL, NULL); /* If entry is not set, then we are just about to send * an ARP request using the data in pending net_pkt. @@ -146,7 +162,6 @@ static inline struct net_pkt *prepare_arp(struct net_if *iface, sizeof(struct net_eth_addr)); } - eth->type = htons(NET_ETH_PTYPE_ARP); memset(ð->dst.addr, 0xff, sizeof(struct net_eth_addr)); hdr->hwtype = htons(NET_ARP_HTYPE_ETH); @@ -181,8 +196,9 @@ static inline struct net_pkt *prepare_arp(struct net_if *iface, struct net_pkt *net_arp_prepare(struct net_pkt *pkt) { - struct net_buf *frag; + struct ethernet_context *ctx = net_if_l2_data(net_pkt_iface(pkt)); struct arp_entry *entry, *free_entry = NULL, *non_pending = NULL; + struct net_buf *frag; struct net_linkaddr *ll; struct net_eth_hdr *hdr; struct in_addr *addr; @@ -191,7 +207,11 @@ struct net_pkt *net_arp_prepare(struct net_pkt *pkt) return NULL; } - if (net_pkt_ll_reserve(pkt) != sizeof(struct net_eth_hdr)) { + if (net_pkt_ll_reserve(pkt) != sizeof(struct net_eth_hdr) +#if defined(CONFIG_NET_VLAN) + && net_pkt_ll_reserve(pkt) != sizeof(struct net_eth_vlan_hdr) +#endif + ) { /* Add the ethernet header if it is missing. */ struct net_buf *header; @@ -200,24 +220,9 @@ struct net_pkt *net_arp_prepare(struct net_pkt *pkt) return NULL; } - net_pkt_set_ll_reserve(pkt, sizeof(struct net_eth_hdr)); - - hdr = (struct net_eth_hdr *)(header->data - - net_pkt_ll_reserve(pkt)); - - hdr->type = htons(NET_ETH_PTYPE_IP); - - ll = net_pkt_ll_dst(pkt); - if (ll->addr) { - memcpy(&hdr->dst.addr, ll->addr, - sizeof(struct net_eth_addr)); - } - - ll = net_pkt_ll_src(pkt); - if (ll->addr) { - memcpy(&hdr->src.addr, ll->addr, - sizeof(struct net_eth_addr)); - } + net_eth_fill_header(ctx, pkt, header, htons(NET_ETH_PTYPE_IP), + net_pkt_ll_src(pkt)->addr, + net_pkt_ll_dst(pkt)->addr); net_pkt_frag_insert(pkt, header); @@ -293,14 +298,9 @@ struct net_pkt *net_arp_prepare(struct net_pkt *pkt) continue; } - hdr = (struct net_eth_hdr *)(frag->data - - net_pkt_ll_reserve(pkt)); - hdr->type = htons(NET_ETH_PTYPE_IP); - - memcpy(&hdr->src.addr, ll->addr, - sizeof(struct net_eth_addr)); - memcpy(&hdr->dst.addr, &entry->eth.addr, - sizeof(struct net_eth_addr)); + hdr = net_eth_fill_header(ctx, pkt, frag, + htons(NET_ETH_PTYPE_IP), + ll->addr, entry->eth.addr); frag = frag->frags; } @@ -368,13 +368,21 @@ static inline void arp_update(struct net_if *iface, static inline struct net_pkt *prepare_arp_reply(struct net_if *iface, struct net_pkt *req) { + struct ethernet_context *ctx = net_if_l2_data(iface); + int eth_hdr_len = sizeof(struct net_eth_hdr); struct net_pkt *pkt; struct net_buf *frag; struct net_arp_hdr *hdr, *query; struct net_eth_hdr *eth, *eth_query; - pkt = net_pkt_get_reserve_tx(sizeof(struct net_eth_hdr), - NET_BUF_TIMEOUT); +#if defined(CONFIG_NET_VLAN) + if (ctx->vlan_enabled && + net_eth_get_vlan_tag(iface) != NET_VLAN_TAG_UNSPEC) { + eth_hdr_len = sizeof(struct net_eth_vlan_hdr); + } +#endif + + pkt = net_pkt_get_reserve_tx(eth_hdr_len, NET_BUF_TIMEOUT); if (!pkt) { goto fail; } @@ -393,12 +401,13 @@ static inline struct net_pkt *prepare_arp_reply(struct net_if *iface, query = NET_ARP_HDR(req); eth_query = NET_ETH_HDR(req); - eth->type = htons(NET_ETH_PTYPE_ARP); +#if defined(CONFIG_NET_VLAN) + net_pkt_set_vlan_tag(pkt, net_pkt_vlan_tag(req)); +#endif - memcpy(ð->dst.addr, ð_query->src.addr, - sizeof(struct net_eth_addr)); - memcpy(ð->src.addr, net_if_get_link_addr(iface)->addr, - sizeof(struct net_eth_addr)); + net_eth_fill_header(ctx, pkt, frag, htons(NET_ETH_PTYPE_ARP), + net_if_get_link_addr(iface)->addr, + eth_query->src.addr); hdr->hwtype = htons(NET_ARP_HTYPE_ETH); hdr->protocol = htons(NET_ETH_PTYPE_IP); diff --git a/subsys/net/ip/l2/ethernet.c b/subsys/net/ip/l2/ethernet.c index 5896382412a2e..38ab719814725 100644 --- a/subsys/net/ip/l2/ethernet.c +++ b/subsys/net/ip/l2/ethernet.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "net_private.h" #include "ipv6.h" @@ -46,21 +47,40 @@ void net_eth_ipv6_mcast_to_mac_addr(const struct in6_addr *ipv6_addr, } #if defined(CONFIG_NET_DEBUG_L2_ETHERNET) -#define print_ll_addrs(pkt, type, len) \ +#define print_ll_addrs(pkt, type, len, src, dst) \ do { \ char out[sizeof("xx:xx:xx:xx:xx:xx")]; \ \ snprintk(out, sizeof(out), "%s", \ - net_sprint_ll_addr(net_pkt_ll_src(pkt)->addr, \ + net_sprint_ll_addr((src)->addr, \ sizeof(struct net_eth_addr))); \ \ - NET_DBG("src %s dst %s type 0x%x len %zu", out, \ - net_sprint_ll_addr(net_pkt_ll_dst(pkt)->addr, \ + NET_DBG("iface %p src %s dst %s type 0x%x len %zu", \ + net_pkt_iface(pkt), out, \ + net_sprint_ll_addr((dst)->addr, \ sizeof(struct net_eth_addr)), \ type, (size_t)len); \ } while (0) + +#define print_vlan_ll_addrs(pkt, type, tci, len, src, dst) \ + do { \ + char out[sizeof("xx:xx:xx:xx:xx:xx")]; \ + \ + snprintk(out, sizeof(out), "%s", \ + net_sprint_ll_addr((src)->addr, \ + sizeof(struct net_eth_addr))); \ + \ + NET_DBG("iface %p src %s dst %s type 0x%x tag %d pri %d " \ + "len %zu", \ + net_pkt_iface(pkt), out, \ + net_sprint_ll_addr((dst)->addr, \ + sizeof(struct net_eth_addr)), \ + type, net_eth_get_vid(tci), net_eth_get_pcp(tci), \ + (size_t)len); \ + } while (0) #else #define print_ll_addrs(...) +#define print_vlan_ll_addrs(...) #endif /* CONFIG_NET_DEBUG_L2_ETHERNET */ static inline void ethernet_update_length(struct net_if *iface, @@ -100,11 +120,31 @@ static inline void ethernet_update_length(struct net_if *iface, static enum net_verdict ethernet_recv(struct net_if *iface, struct net_pkt *pkt) { +#if defined(CONFIG_NET_VLAN) + struct net_eth_vlan_hdr *hdr_vlan = + (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); + struct ethernet_context *ctx = net_if_l2_data(iface); + bool vlan_enabled = false; +#endif struct net_eth_hdr *hdr = NET_ETH_HDR(pkt); struct net_linkaddr *lladdr; sa_family_t family; + u16_t type = ntohs(hdr->type); + u8_t hdr_len = sizeof(struct net_eth_hdr); + +#if defined(CONFIG_NET_VLAN) + if (net_eth_is_vlan_enabled(ctx, iface)) { + if (type == NET_ETH_PTYPE_VLAN) { + net_pkt_set_vlan_tci(pkt, ntohs(hdr_vlan->vlan.tci)); + type = ntohs(hdr_vlan->type); + hdr_len = sizeof(struct net_eth_vlan_hdr); + } + + vlan_enabled = true; + } +#endif - switch (ntohs(hdr->type)) { + switch (type) { case NET_ETH_PTYPE_IP: case NET_ETH_PTYPE_ARP: net_pkt_set_family(pkt, AF_INET); @@ -114,8 +154,13 @@ static enum net_verdict ethernet_recv(struct net_if *iface, net_pkt_set_family(pkt, AF_INET6); family = AF_INET6; break; +#if defined(CONFIG_NET_GPTP) + case NET_ETH_PTYPE_PTP: + family = AF_UNSPEC; + break; +#endif default: - NET_DBG("Unknown hdr type 0x%04x", hdr->type); + NET_DBG("Unknown hdr type 0x%04x iface %p", type, iface); return NET_DROP; } @@ -130,10 +175,22 @@ static enum net_verdict ethernet_recv(struct net_if *iface, lladdr->len = sizeof(struct net_eth_addr); lladdr->type = NET_LINK_ETHERNET; - print_ll_addrs(pkt, ntohs(hdr->type), net_pkt_get_len(pkt)); +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + print_vlan_ll_addrs(pkt, type, ntohs(hdr_vlan->vlan.tci), + net_pkt_get_len(pkt), + net_pkt_ll_src(pkt), net_pkt_ll_dst(pkt)); + } else +#endif + { + print_ll_addrs(pkt, type, net_pkt_get_len(pkt), + net_pkt_ll_src(pkt), net_pkt_ll_dst(pkt)); + } if (!net_eth_is_addr_broadcast((struct net_eth_addr *)lladdr->addr) && !net_eth_is_addr_multicast((struct net_eth_addr *)lladdr->addr) && + !net_eth_is_addr_lldp_multicast( + (struct net_eth_addr *)lladdr->addr) && !net_linkaddr_cmp(net_if_get_link_addr(iface), lladdr)) { /* The ethernet frame is not for me as the link addresses * are different. @@ -145,17 +202,24 @@ static enum net_verdict ethernet_recv(struct net_if *iface, return NET_DROP; } - net_pkt_set_ll_reserve(pkt, sizeof(struct net_eth_hdr)); + net_pkt_set_ll_reserve(pkt, hdr_len); net_buf_pull(pkt->frags, net_pkt_ll_reserve(pkt)); #ifdef CONFIG_NET_ARP - if (family == AF_INET && hdr->type == htons(NET_ETH_PTYPE_ARP)) { + if (family == AF_INET && type == NET_ETH_PTYPE_ARP) { NET_DBG("ARP packet from %s received", net_sprint_ll_addr((u8_t *)hdr->src.addr, sizeof(struct net_eth_addr))); return net_arp_input(pkt); } #endif + +#if defined(CONFIG_NET_GPTP) + if (type == NET_ETH_PTYPE_PTP) { + return net_gptp_recv(iface, pkt); + } +#endif + ethernet_update_length(iface, pkt); return NET_CONTINUE; @@ -196,10 +260,139 @@ static inline bool check_if_dst_is_broadcast_or_mcast(struct net_if *iface, return false; } +#if defined(CONFIG_NET_VLAN) +static enum net_verdict set_vlan_tag(struct ethernet_context *ctx, + struct net_if *iface, + struct net_pkt *pkt) +{ + int i; + + if (net_pkt_vlan_tag(pkt) != NET_VLAN_TAG_UNSPEC) { + return NET_OK; + } + +#if defined(CONFIG_NET_IPV6) + if (net_pkt_family(pkt) == AF_INET6) { + struct net_if *target; + + if (net_if_ipv6_addr_lookup(&NET_IPV6_HDR(pkt)->src, + &target)) { + if (target != iface) { + NET_DBG("Iface %p should be %p", iface, + target); + + iface = target; + } + } + } +#endif + +#if defined(CONFIG_NET_IPV4) + if (net_pkt_family(pkt) == AF_INET) { + struct net_if *target; + + if (net_if_ipv4_addr_lookup(&NET_IPV4_HDR(pkt)->src, + &target)) { + if (target != iface) { + NET_DBG("Iface %p should be %p", iface, + target); + iface = target; + } + } + } +#endif + + for (i = 0; i < CONFIG_NET_VLAN_COUNT; i++) { + if (ctx->vlan[i].tag == NET_VLAN_TAG_UNSPEC || + ctx->vlan[i].iface != iface) { + continue; + } + + /* Depending on source address, use the proper network + * interface when sending. + */ + net_pkt_set_vlan_tag(pkt, ctx->vlan[i].tag); + + return NET_OK; + } + + return NET_DROP; +} + +static void set_vlan_priority(struct ethernet_context *ctx, + struct net_pkt *pkt) +{ + /* FIXME: Currently just convert packet priority to VLAN + * priority. This needs to be fixed as VLAN priority is not necessarily + * the same as packet priority. + */ + net_pkt_set_vlan_priority(pkt, net_pkt_priority(pkt)); +} +#endif /* CONFIG_NET_VLAN */ + +struct net_eth_hdr *net_eth_fill_header(struct ethernet_context *ctx, + struct net_pkt *pkt, + struct net_buf *frag, + u32_t ptype, + u8_t *src, + u8_t *dst) +{ + struct net_eth_hdr *hdr; + + NET_ASSERT(net_buf_headroom(frag) > + sizeof(struct net_eth_addr)); + +#if defined(CONFIG_NET_VLAN) + if (net_eth_is_vlan_enabled(ctx, net_pkt_iface(pkt))) { + struct net_eth_vlan_hdr *hdr_vlan; + + hdr_vlan = (struct net_eth_vlan_hdr *)(frag->data - + net_pkt_ll_reserve(pkt)); + + if (dst && ((u8_t *)&hdr_vlan->dst != dst)) { + memcpy(&hdr_vlan->dst, dst, + sizeof(struct net_eth_addr)); + } + + if (src && ((u8_t *)&hdr_vlan->src != src)) { + memcpy(&hdr_vlan->src, src, + sizeof(struct net_eth_addr)); + } + + hdr_vlan->type = ptype; + hdr_vlan->vlan.tpid = htons(NET_ETH_PTYPE_VLAN); + hdr_vlan->vlan.tci = htons(net_pkt_vlan_tci(pkt)); + + print_vlan_ll_addrs(pkt, ntohs(hdr_vlan->type), + net_pkt_vlan_tci(pkt), + frag->len, + &hdr_vlan->src, &hdr_vlan->dst); + + return (struct net_eth_hdr *)hdr_vlan; + } +#endif + + hdr = (struct net_eth_hdr *)(frag->data - net_pkt_ll_reserve(pkt)); + + if (dst && ((u8_t *)&hdr->dst != dst)) { + memcpy(&hdr->dst, dst, sizeof(struct net_eth_addr)); + } + + if (src && ((u8_t *)&hdr->src != src)) { + memcpy(&hdr->src, src, sizeof(struct net_eth_addr)); + } + + hdr->type = ptype; + + print_ll_addrs(pkt, ntohs(hdr->type), frag->len, &hdr->src, &hdr->dst); + + return hdr; +} + static enum net_verdict ethernet_send(struct net_if *iface, struct net_pkt *pkt) { - struct net_eth_hdr *hdr = NET_ETH_HDR(pkt); + struct ethernet_context *ctx = net_if_l2_data(iface); struct net_buf *frag; u16_t ptype; @@ -305,6 +498,16 @@ static enum net_verdict ethernet_send(struct net_if *iface, ptype = htons(NET_ETH_PTYPE_IPV6); } +#if defined(CONFIG_NET_VLAN) + if (net_eth_is_vlan_enabled(ctx, iface)) { + if (set_vlan_tag(ctx, iface, pkt) == NET_DROP) { + return NET_DROP; + } + + set_vlan_priority(ctx, pkt); + } +#endif /* CONFIG_NET_VLAN */ + /* Then go through the fragments and set the ethernet header. */ frag = pkt->frags; @@ -312,22 +515,9 @@ static enum net_verdict ethernet_send(struct net_if *iface, NET_ASSERT_INFO(frag, "No data!"); while (frag) { - NET_ASSERT(net_buf_headroom(frag) > sizeof(struct net_eth_addr)); - - hdr = (struct net_eth_hdr *)(frag->data - - net_pkt_ll_reserve(pkt)); - if ((u8_t *)&hdr->dst != net_pkt_ll_dst(pkt)->addr) { - memcpy(&hdr->dst, net_pkt_ll_dst(pkt)->addr, - sizeof(struct net_eth_addr)); - } - - if ((u8_t *)&hdr->src != net_pkt_ll_src(pkt)->addr) { - memcpy(&hdr->src, net_pkt_ll_src(pkt)->addr, - sizeof(struct net_eth_addr)); - } - - hdr->type = ptype; - print_ll_addrs(pkt, ntohs(hdr->type), frag->len); + net_eth_fill_header(ctx, pkt, frag, ptype, + net_pkt_ll_src(pkt)->addr, + net_pkt_ll_dst(pkt)->addr); frag = frag->frags; } @@ -343,7 +533,14 @@ static enum net_verdict ethernet_send(struct net_if *iface, static inline u16_t ethernet_reserve(struct net_if *iface, void *unused) { - ARG_UNUSED(iface); +#if defined(CONFIG_NET_VLAN) + struct ethernet_context *ctx = net_if_l2_data(iface); + + if (net_eth_is_vlan_enabled(ctx, iface)) { + return sizeof(struct net_eth_vlan_hdr); + } +#endif + ARG_UNUSED(unused); return sizeof(struct net_eth_hdr); @@ -360,5 +557,236 @@ static inline int ethernet_enable(struct net_if *iface, bool state) return 0; } +#if defined(CONFIG_NET_VLAN) +struct net_if *net_eth_get_vlan_iface(struct ethernet_context *ctx, u16_t tag) +{ + int i; + + for (i = 0; i < CONFIG_NET_VLAN_COUNT; i++) { + if (ctx->vlan[i].tag == NET_VLAN_TAG_UNSPEC || + ctx->vlan[i].tag != tag) { + continue; + } + + NET_DBG("[%d] vlan tag %d -> iface %p", i, tag, + ctx->vlan[i].iface); + + return ctx->vlan[i].iface; + } + + return NULL; +} + +static bool enable_vlan_iface(struct ethernet_context *ctx, + struct net_if *iface) +{ + int iface_idx = net_if_get_by_iface(iface); + + if (iface_idx < 0) { + return false; + } + + atomic_set_bit(ctx->interfaces, iface_idx); + + return true; +} + +static bool disable_vlan_iface(struct ethernet_context *ctx, + struct net_if *iface) +{ + int iface_idx = net_if_get_by_iface(iface); + + if (iface_idx < 0) { + return false; + } + + atomic_clear_bit(ctx->interfaces, iface_idx); + + return true; +} + +static bool is_vlan_enabled_for_iface(struct ethernet_context *ctx, + struct net_if *iface) +{ + int iface_idx = net_if_get_by_iface(iface); + + if (iface_idx < 0) { + return false; + } + + return !!atomic_test_bit(ctx->interfaces, iface_idx); +} + +bool net_eth_is_vlan_enabled(struct ethernet_context *ctx, + struct net_if *iface) +{ + if (ctx->vlan_enabled) { + if (ctx->vlan_enabled == NET_VLAN_MAX_COUNT) { + /* All network interface are using VLAN, no need + * to check further. + */ + return true; + } + + if (is_vlan_enabled_for_iface(ctx, iface)) { + return true; + } + } + + return false; +} + +u16_t net_eth_get_vlan_tag(struct net_if *iface) +{ + struct ethernet_context *ctx = net_if_l2_data(iface); + int i; + + for (i = 0; i < CONFIG_NET_VLAN_COUNT; i++) { + if (ctx->vlan[i].iface == iface) { + return ctx->vlan[i].tag; + } + } + + return NET_VLAN_TAG_UNSPEC; +} + +static struct ethernet_vlan *get_vlan(struct ethernet_context *ctx, + struct net_if *iface, + u16_t vlan_tag) +{ + int i; + + for (i = 0; i < CONFIG_NET_VLAN_COUNT; i++) { + if (ctx->vlan[i].iface == iface && + ctx->vlan[i].tag == vlan_tag) { + return &ctx->vlan[i]; + } + } + + return NULL; +} + +int net_eth_vlan_enable(struct net_if *iface, u16_t tag) +{ + struct ethernet_context *ctx = net_if_l2_data(iface); + const struct ethernet_api *eth = + net_if_get_device(iface)->driver_api; + struct ethernet_vlan *vlan; + int i; + + if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) { + return -EINVAL; + } + + if (!ctx->is_init) { + return -EPERM; + } + + if (tag == NET_VLAN_TAG_UNSPEC) { + return -EBADF; + } + + vlan = get_vlan(ctx, iface, tag); + if (vlan) { + return -EALREADY; + } + + for (i = 0; i < CONFIG_NET_VLAN_COUNT; i++) { + if (ctx->vlan[i].iface != iface) { + continue; + } + + if (ctx->vlan[i].tag != NET_VLAN_TAG_UNSPEC) { + continue; + } + + NET_DBG("[%d] Adding vlan tag %d to iface %p", i, tag, iface); + + ctx->vlan[i].tag = tag; + + enable_vlan_iface(ctx, iface); + + if (eth->vlan_setup) { + eth->vlan_setup(iface, tag, true); + } + + ctx->vlan_enabled++; + if (ctx->vlan_enabled > NET_VLAN_MAX_COUNT) { + ctx->vlan_enabled = NET_VLAN_MAX_COUNT; + } + + return 0; + } + + return -ENOSPC; +} + +int net_eth_vlan_disable(struct net_if *iface, u16_t tag) +{ + struct ethernet_context *ctx = net_if_l2_data(iface); + const struct ethernet_api *eth = + net_if_get_device(iface)->driver_api; + struct ethernet_vlan *vlan; + + if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) { + return -EINVAL; + } + + if (tag == NET_VLAN_TAG_UNSPEC) { + return -EBADF; + } + + vlan = get_vlan(ctx, iface, tag); + if (!vlan) { + return -ESRCH; + } + + NET_DBG("Removing vlan tag %d from iface %p", vlan->tag, vlan->iface); + + vlan->tag = NET_VLAN_TAG_UNSPEC; + + disable_vlan_iface(ctx, iface); + + if (eth->vlan_setup) { + eth->vlan_setup(iface, tag, false); + } + + ctx->vlan_enabled--; + if (ctx->vlan_enabled < 0) { + ctx->vlan_enabled = 0; + } + + return 0; +} +#endif + NET_L2_INIT(ETHERNET_L2, ethernet_recv, ethernet_send, ethernet_reserve, ethernet_enable); + +void ethernet_init(struct net_if *iface) +{ + struct ethernet_context *ctx = net_if_l2_data(iface); + int i; + + NET_DBG("Initializing Ethernet L2 %p for iface %p", ctx, iface); + +#if defined(CONFIG_NET_VLAN) + for (i = 0; i < CONFIG_NET_VLAN_COUNT; i++) { + if (!ctx->vlan[i].iface) { + NET_DBG("[%d] alloc ctx %p iface %p", i, ctx, iface); + ctx->vlan[i].tag = NET_VLAN_TAG_UNSPEC; + ctx->vlan[i].iface = iface; + + if (!ctx->is_init) { + atomic_clear(ctx->interfaces); + } + + break; + } + } +#else + ARG_UNUSED(i); +#endif + + ctx->is_init = true; +} diff --git a/subsys/net/ip/l2/gptp/CMakeLists.txt b/subsys/net/ip/l2/gptp/CMakeLists.txt new file mode 100644 index 0000000000000..0575afe577eda --- /dev/null +++ b/subsys/net/ip/l2/gptp/CMakeLists.txt @@ -0,0 +1,13 @@ +zephyr_library() +zephyr_library_include_directories(. ../..) +zephyr_library_compile_definitions_ifdef( + CONFIG_NEWLIB_LIBC __LINUX_ERRNO_EXTENSIONS__ + ) + +zephyr_library_sources( + gptp.c + gptp_iface.c + gptp_md.c + gptp_messages.c + gptp_mi.c + ) diff --git a/subsys/net/ip/l2/gptp/Kconfig b/subsys/net/ip/l2/gptp/Kconfig new file mode 100644 index 0000000000000..e03c32ee82faf --- /dev/null +++ b/subsys/net/ip/l2/gptp/Kconfig @@ -0,0 +1,96 @@ +# +# Copyright (c) 2018 Intel Corporation. +# +# SPDX-License-Identifier: Apache-2.0 +# + +menuconfig NET_GPTP + bool "Enable IEEE 802.1AS (gPTP) support [EXPERIMENTAL]" + depends on NET_L2_ETHERNET + select NET_PKT_TIMESTAMP + select PTP_CLOCK + default n + help + Enable gPTP driver that send and receives gPTP packets + and handles network packet timestamps. + +if NET_GPTP + +config NET_DEBUG_GPTP + bool "Enable Debug Information for gPTP" + default n + depends on NET_LOG + help + Enable logs for the gPTP stack. + +config NET_GPTP_NUM_PORTS + int "Number of gPTP ports" + default 1 + help + Configures the gPTP stack to work with the given number of ports. + The port concept is the same thing as network interface. + +config NET_GPTP_NEIGHBOR_PROP_DELAY_THR + int "Set neighbor propagation delay threshold (ns)" + default 100000 + help + Defines the neighbor propagation delay threshold in nanoseconds. + This is the propagation time threshold, above which a port is not + considered capable of participating in the IEEE 802.1AS protocol. + See IEEE 802.1AS chapter 11.2.12.6 for details. + +config NET_GPTP_INIT_LOG_PDELAY_REQ_ITV + int "Set initial pdelay request interval in Log2 base" + default 0 + help + Defines the interval at which a Path Delay Request will be sent. + The value is the converted in nanoseconds as follow: + nanoseconds = (10^9) * 2^(16 + value) + +config NET_GPTP_INIT_LOG_SYNC_ITV + int "Set initial sync interval in Log2 base" + default -3 + help + Defines the interval at which a Sync message will be sent. + The value is the converted in nanoseconds as follow: + nanoseconds = (10^9) * 2^(16 + value) + +config NET_GPTP_INIT_LOG_ANNOUNCE_ITV + int "Set initial announce interval in Log2 base" + default 0 + help + Defines the interval at which an Announce message will be sent. + The value is the converted in nanoseconds as follow: + nanoseconds = (10^9) * 2^(16 + value) + +config NET_GPTP_SYNC_RECEIPT_TIMEOUT + int "Number of sync intervals to wait" + default 3 + help + Defines the number of sync intervals to wait without receiving + synchronization information before assuming that the master is no + longer transmitting synchronization information. + +config NET_GPTP_ANNOUNCE_RECEIPT_TIMEOUT + int "Number of announce intervals to wait" + default 3 + help + Defines the number of announce intervals to wait without receiving + an Announce message before assuming that the master is no longer + transmitting Announce messages. + +config NET_GPTP_USE_DEFAULT_CLOCK_UPDATE + bool "Use a default clock update function" + default y + help + Use a default internal function to update port local clock. + This method may not be accurate. + +config NET_GPTP_STATISTICS + bool "Collect gPTP statistics" + default n + help + Enable this if you need to collect gPTP statistics. The statistics + can be seen in net-shell if needed. + +endif diff --git a/subsys/net/ip/l2/gptp/gptp.c b/subsys/net/ip/l2/gptp/gptp.c new file mode 100644 index 0000000000000..89aba5d00f42f --- /dev/null +++ b/subsys/net/ip/l2/gptp/gptp.c @@ -0,0 +1,885 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if defined(CONFIG_NET_DEBUG_GPTP) +#define SYS_LOG_DOMAIN "net/gptp" +#define NET_LOG_ENABLED 1 +#endif + +#include +#include +#include +#include +#include +#include + +#include "gptp_private.h" + +#if !defined(CONFIG_NET_GPTP_STACK_SIZE) +#define CONFIG_NET_GPTP_STACK_SIZE 2048 +#endif + +#if CONFIG_NET_GPTP_NUM_PORTS > 32 +/* + * Boolean arrays sizes have been hardcoded. + * It has been arbitrary chosen that a system can not + * have more than 32 ports. + */ +#error Maximum number of ports exceeded. (Max is 32). +#endif + +NET_STACK_DEFINE(GPTP, gptp_stack, CONFIG_NET_GPTP_STACK_SIZE, + CONFIG_NET_GPTP_STACK_SIZE); +K_FIFO_DEFINE(gptp_rx_queue); + +static struct k_thread gptp_thread_data; +struct gptp_domain gptp_domain; + +int gptp_get_port_number(struct net_if *iface) +{ + int port; + + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + if (GPTP_PORT_IFACE(port) == iface) { + return port; + } + } + + return -ENODEV; +} + +bool gptp_is_slave_port(int port) +{ + return (GPTP_GLOBAL_DS()->selected_role[port] == GPTP_PORT_SLAVE); +} + +/* + * Use the given port to generate the clock identity + * for the device. + * The clock identity is unique for one time-aware system. + */ +static void gptp_compute_clock_identity(int port) +{ + struct device *dev = GPTP_PORT_DRV(port); + struct net_if *iface = net_if_lookup_by_dev(dev); + struct gptp_default_ds *default_ds; + + default_ds = GPTP_DEFAULT_DS(); + + if (iface) { + default_ds->clk_id[0] = net_if_get_link_addr(iface)->addr[0]; + default_ds->clk_id[1] = net_if_get_link_addr(iface)->addr[1]; + default_ds->clk_id[2] = net_if_get_link_addr(iface)->addr[2]; + default_ds->clk_id[3] = 0xFF; + default_ds->clk_id[4] = 0xFE; + default_ds->clk_id[5] = net_if_get_link_addr(iface)->addr[3]; + default_ds->clk_id[6] = net_if_get_link_addr(iface)->addr[4]; + default_ds->clk_id[7] = net_if_get_link_addr(iface)->addr[5]; + } +} + +#define PRINT_INFO(msg, hdr, pkt) \ + NET_DBG("Received %s seq %d pkt %p", msg, \ + hdr->sequence_id, pkt) \ + + +static bool gptp_handle_critical_msg(struct net_if *iface, struct net_pkt *pkt) +{ + struct gptp_hdr *hdr = GPTP_HDR(pkt); + struct gptp_pdelay_resp_state *pdelay_resp_state; + int port; + bool handled = false; + + port = gptp_get_port_number(iface); + if (port == -ENODEV) { + NET_DBG("No port found for gPTP buffer"); + return handled; + } + + pdelay_resp_state = &GPTP_PORT_STATE(port)->pdelay_resp; + + switch (hdr->message_type) { + case GPTP_PATH_DELAY_REQ_MESSAGE: + if (GPTP_CHECK_LEN(pkt, GPTP_PDELAY_REQ_LEN)) { + NET_WARN("Invalid length for %s packet " + "should have %d bytes but has %d bytes", + "PDELAY_REQUEST", + GPTP_PDELAY_REQ_LEN, + GPTP_PACKET_LEN(pkt)); + break; + } + + PRINT_INFO("PDELAY_REQUEST", hdr, pkt); + + if (pdelay_resp_state->state != GPTP_PDELAY_RESP_NOT_ENABLED) { + gptp_handle_pdelay_req(port, pkt); + } + + handled = true; + break; + default: + /* Not a critical message, this will be handled later. */ + break; + } + + return handled; +} + +static void gptp_handle_msg(struct net_pkt *pkt) +{ + struct gptp_pdelay_req_state *pdelay_req_state; + struct gptp_sync_rcv_state *sync_rcv_state; + struct gptp_port_announce_receive_state *pa_rcv_state; + struct gptp_port_bmca_data *bmca_data; + struct gptp_hdr *hdr = GPTP_HDR(pkt); + int port; + + port = gptp_get_port_number(net_pkt_iface(pkt)); + if (port == -ENODEV) { + NET_DBG("No port found for ptp buffer"); + return; + } + + pdelay_req_state = &GPTP_PORT_STATE(port)->pdelay_req; + sync_rcv_state = &GPTP_PORT_STATE(port)->sync_rcv; + + net_if_update_rx_timestamp_stats(pkt); + + switch (hdr->message_type) { + case GPTP_SYNC_MESSAGE: + if (GPTP_CHECK_LEN(pkt, GPTP_SYNC_LEN)) { + NET_WARN("Invalid length for %s packet " + "should have %d bytes but has %d bytes", + "SYNC", + GPTP_SYNC_LEN, + GPTP_PACKET_LEN(pkt)); + GPTP_STATS_INC(port, rx_ptp_packet_discard_count); + break; + } + + PRINT_INFO("SYNC", hdr, pkt); + + sync_rcv_state->rcvd_sync = true; + + /* If we already have one, drop the previous one. */ + if (sync_rcv_state->rcvd_sync_ptr) { + net_pkt_unref(sync_rcv_state->rcvd_sync_ptr); + } + + /* Keep the buffer alive until follow_up is received. */ + net_pkt_ref(pkt); + sync_rcv_state->rcvd_sync_ptr = pkt; + + GPTP_STATS_INC(port, rx_sync_count); + break; + + case GPTP_DELAY_REQ_MESSAGE: + NET_DBG("Delay Request not handled."); + break; + + case GPTP_PATH_DELAY_REQ_MESSAGE: + /* + * Path Delay Responses to Path Delay Requests need + * very low latency. These need to handled in priority + * when received as they cannot afford to be delayed + * by context switches. + */ + NET_WARN("Path Delay Request received as normal messages!"); + GPTP_STATS_INC(port, rx_ptp_packet_discard_count); + break; + + case GPTP_PATH_DELAY_RESP_MESSAGE: + if (GPTP_CHECK_LEN(pkt, GPTP_PDELAY_RESP_LEN)) { + NET_WARN("Invalid length for %s packet " + "should have %d bytes but has %d bytes", + "PATH_DELAY_RESP", + GPTP_PDELAY_RESP_LEN, + GPTP_PACKET_LEN(pkt)); + GPTP_STATS_INC(port, rx_ptp_packet_discard_count); + break; + } + + PRINT_INFO("PATH_DELAY_RESP", hdr, pkt); + + pdelay_req_state->rcvd_pdelay_resp++; + + /* If we already have one, drop the received one. */ + if (pdelay_req_state->rcvd_pdelay_resp_ptr) { + break; + } + + /* Keep the buffer alive until pdelay_rate_ratio is computed. */ + net_pkt_ref(pkt); + pdelay_req_state->rcvd_pdelay_resp_ptr = pkt; + break; + + case GPTP_FOLLOWUP_MESSAGE: + if (GPTP_CHECK_LEN(pkt, GPTP_FOLLOW_UP_LEN)) { + NET_WARN("Invalid length for %s packet " + "should have %d bytes but has %d bytes", + "FOLLOWUP", + GPTP_FOLLOW_UP_LEN, + GPTP_PACKET_LEN(pkt)); + GPTP_STATS_INC(port, rx_ptp_packet_discard_count); + break; + } + + PRINT_INFO("FOLLOWUP", hdr, pkt); + + sync_rcv_state->rcvd_follow_up = true; + + /* If we already have one, drop the previous one. */ + if (sync_rcv_state->rcvd_follow_up_ptr) { + net_pkt_unref(sync_rcv_state->rcvd_follow_up_ptr); + } + + /* Keep the pkt alive until info is extracted. */ + sync_rcv_state->rcvd_follow_up_ptr = net_pkt_ref(pkt); + NET_DBG("Keeping follow-up seq %d msg %p", hdr->sequence_id, + pkt); + break; + + case GPTP_PATH_DELAY_FOLLOWUP_MESSAGE: + if (GPTP_CHECK_LEN(pkt, GPTP_PDELAY_RESP_FUP_LEN)) { + NET_WARN("Invalid length for %s packet " + "should have %d bytes but has %d bytes", + "PATH_DELAY_FOLLOWUP", + GPTP_PDELAY_RESP_FUP_LEN, + GPTP_PACKET_LEN(pkt)); + GPTP_STATS_INC(port, rx_ptp_packet_discard_count); + break; + } + + PRINT_INFO("PATH_DELAY_FOLLOWUP", hdr, pkt); + + pdelay_req_state->rcvd_pdelay_follow_up++; + + /* If we already have one, drop the received one. */ + if (pdelay_req_state->rcvd_pdelay_follow_up_ptr) { + break; + } + + /* Keep the buffer alive until pdelay_rate_ratio is computed. */ + net_pkt_ref(pkt); + pdelay_req_state->rcvd_pdelay_follow_up_ptr = pkt; + + GPTP_STATS_INC(port, rx_pdelay_resp_fup_count); + break; + + case GPTP_ANNOUNCE_MESSAGE: + if (GPTP_ANNOUNCE_CHECK_LEN(pkt)) { + NET_WARN("Invalid length for %s packet " + "should have %d bytes but has %d bytes", + "ANNOUNCE", + GPTP_ANNOUNCE_LEN(pkt), + GPTP_PACKET_LEN(pkt)); + GPTP_STATS_INC(port, rx_ptp_packet_discard_count); + break; + } + + PRINT_INFO("ANNOUNCE", hdr, pkt); + + pa_rcv_state = &GPTP_PORT_STATE(port)->pa_rcv; + bmca_data = GPTP_PORT_BMCA_DATA(port); + if (pa_rcv_state->rcvd_announce == false && + bmca_data->rcvd_announce_ptr == NULL) { + pa_rcv_state->rcvd_announce = true; + bmca_data->rcvd_announce_ptr = pkt; + net_pkt_ref(pkt); + } + + GPTP_STATS_INC(port, rx_announce_count); + break; + + case GPTP_SIGNALING_MESSAGE: + if (GPTP_CHECK_LEN(pkt, GPTP_SIGNALING_LEN)) { + NET_WARN("Invalid length for %s packet " + "should have %d bytes but has %d bytes", + "SIGNALING", + GPTP_SIGNALING_LEN, + GPTP_PACKET_LEN(pkt)); + GPTP_STATS_INC(port, rx_ptp_packet_discard_count); + break; + } + + PRINT_INFO("SIGNALING", hdr, pkt); + + gptp_handle_signaling(port, pkt); + break; + + case GPTP_MANAGEMENT_MESSAGE: + PRINT_INFO("MANAGEMENT", hdr, pkt); + GPTP_STATS_INC(port, rx_ptp_packet_discard_count); + break; + + default: + NET_DBG("Received unknown message %x", hdr->message_type); + GPTP_STATS_INC(port, rx_ptp_packet_discard_count); + break; + } +} + +enum net_verdict net_gptp_recv(struct net_if *iface, struct net_pkt *pkt) +{ + struct gptp_hdr *hdr = GPTP_HDR(pkt); + + if ((hdr->ptp_version != GPTP_VERSION) || + (hdr->transport_specific != GPTP_TRANSPORT_802_1_AS)) { + /* The Stack only supports PTP V2 and transportSpecific set to 1 + * with IEEE802.1AS-2011. + */ + goto drop; + } + + /* Handle critical messages. */ + if (!gptp_handle_critical_msg(iface, pkt)) { + k_fifo_put(&gptp_rx_queue, pkt); + + /* Returning OK here makes sure the network statistics are + * properly updated. + */ + return NET_OK; + } + +drop: + /* Message not propagated up in the stack. */ + return NET_DROP; +} + +static void gptp_init_clock_ds(void) +{ + struct gptp_global_ds *global_ds; + struct gptp_default_ds *default_ds; + struct gptp_current_ds *current_ds; + struct gptp_parent_ds *parent_ds; + struct gptp_time_prop_ds *prop_ds; + + global_ds = GPTP_GLOBAL_DS(); + default_ds = GPTP_DEFAULT_DS(); + current_ds = GPTP_CURRENT_DS(); + parent_ds = GPTP_PARENT_DS(); + prop_ds = GPTP_PROPERTIES_DS(); + + /* Initialize global data set. */ + memset(global_ds, 0, sizeof(struct gptp_global_ds)); + + /* Initialize default data set. */ + + /* Compute the clock identity from the first port MAC address. */ + gptp_compute_clock_identity(GPTP_PORT_START); + + /* XXX GrandMaster capability is not supported. */ + default_ds->gm_capable = false; + default_ds->clk_quality.clock_class = GPTP_CLASS_SLAVE_ONLY; + default_ds->clk_quality.clock_accuracy = + GPTP_CLOCK_ACCURACY_UNKNOWN; + default_ds->clk_quality.offset_scaled_log_var = + GPTP_OFFSET_SCALED_LOG_VAR_UNKNOWN; + default_ds->priority1 = GPTP_PRIORITY1_NON_GM_CAPABLE; + default_ds->priority2 = GPTP_PRIORITY2_DEFAULT; + + default_ds->cur_utc_offset = 37; /* Current leap seconds TAI - UTC */ + default_ds->flags.all = 0; + default_ds->flags.time_traceable = true; + default_ds->time_source = GPTP_TS_INTERNAL_OSCILLATOR; + + /* Initialize current data set. */ + memset(current_ds, 0, sizeof(struct gptp_current_ds)); + + /* Initialize parent data set. */ + + /* parent clock id is initialized to default_ds clock id. */ + memcpy(&parent_ds->port_id.clk_id, + &default_ds->clk_id, GPTP_CLOCK_ID_LEN); + memcpy(&parent_ds->gm_id, + &default_ds->clk_id, GPTP_CLOCK_ID_LEN); + parent_ds->port_id.port_number = 0; + + /* TODO: Check correct value for below field. */ + parent_ds->cumulative_rate_ratio = 0; + + parent_ds->gm_clk_quality.clock_class = + default_ds->clk_quality.clock_class; + parent_ds->gm_clk_quality.clock_accuracy = + default_ds->clk_quality.clock_accuracy; + parent_ds->gm_clk_quality.offset_scaled_log_var = + default_ds->clk_quality.offset_scaled_log_var; + parent_ds->gm_priority1 = default_ds->priority1; + parent_ds->gm_priority2 = default_ds->priority2; + + /* Initialize properties data set. */ + + /* TODO: Get accurate values for below. From the GM. */ + prop_ds->cur_utc_offset = 37; /* Current leap seconds TAI - UTC */ + prop_ds->cur_utc_offset_valid = false; + prop_ds->leap59 = false; + prop_ds->leap61 = false; + prop_ds->time_traceable = false; + prop_ds->freq_traceable = false; + prop_ds->time_source = GPTP_TS_INTERNAL_OSCILLATOR; + + /* Set system values. */ + global_ds->sys_flags.all = default_ds->flags.all; + global_ds->sys_current_utc_offset = default_ds->cur_utc_offset; + global_ds->sys_time_source = default_ds->time_source; +} + +static void gptp_init_port_ds(int port) +{ + struct gptp_default_ds *default_ds; + struct gptp_port_ds *port_ds; + +#if defined(CONFIG_NET_GPTP_STATISTICS) + struct gptp_port_param_ds *port_param_ds; + + port_param_ds = GPTP_PORT_PARAM_DS(port); +#endif + + default_ds = GPTP_DEFAULT_DS(); + port_ds = GPTP_PORT_DS(port); + + /* Initialize port data set. */ + memcpy(port_ds->port_id.clk_id, default_ds->clk_id, GPTP_CLOCK_ID_LEN); + port_ds->port_id.port_number = port; + + port_ds->ptt_port_enabled = true; + port_ds->prev_ptt_port_enabled = true; + + port_ds->neighbor_prop_delay = 0; + port_ds->neighbor_prop_delay_thresh = GPTP_NEIGHBOR_PROP_DELAY_THR; + port_ds->delay_asymmetry = 0; + + port_ds->ini_log_announce_itv = CONFIG_NET_GPTP_INIT_LOG_ANNOUNCE_ITV; + port_ds->cur_log_announce_itv = port_ds->ini_log_announce_itv; + port_ds->announce_receipt_timeout = + CONFIG_NET_GPTP_ANNOUNCE_RECEIPT_TIMEOUT; + + /* Substract 1 to divide by 2 the sync interval. */ + port_ds->ini_log_half_sync_itv = CONFIG_NET_GPTP_INIT_LOG_SYNC_ITV - 1; + port_ds->cur_log_half_sync_itv = port_ds->ini_log_half_sync_itv; + port_ds->sync_receipt_timeout = CONFIG_NET_GPTP_SYNC_RECEIPT_TIMEOUT; + port_ds->sync_receipt_timeout_time_itv = 10000000; /* 10ms */ + + port_ds->ini_log_pdelay_req_itv = + CONFIG_NET_GPTP_INIT_LOG_PDELAY_REQ_ITV; + port_ds->cur_log_pdelay_req_itv = port_ds->ini_log_pdelay_req_itv; + port_ds->allowed_lost_responses = GPTP_ALLOWED_LOST_RESP; + port_ds->version = GPTP_VERSION; + + gptp_set_time_itv(&port_ds->pdelay_req_itv, 1, + port_ds->cur_log_pdelay_req_itv); + + gptp_set_time_itv(&port_ds->half_sync_itv, 1, + port_ds->cur_log_half_sync_itv); + + port_ds->compute_neighbor_rate_ratio = true; + port_ds->compute_neighbor_prop_delay = true; + + /* Random Sequence Numbers. */ + port_ds->sync_seq_id = (u16_t)sys_rand32_get(); + port_ds->pdelay_req_seq_id = (u16_t)sys_rand32_get(); + port_ds->announce_seq_id = (u16_t)sys_rand32_get(); + port_ds->signaling_seq_id = (u16_t)sys_rand32_get(); + +#if defined(CONFIG_NET_GPTP_STATISTICS) + /* Initialize stats data set. */ + memset(port_param_ds, 0, sizeof(struct gptp_port_param_ds)); +#endif +} + +static void gptp_init_state_machine(void) +{ + gptp_md_init_state_machine(); + gptp_mi_init_state_machine(); +} + +static void gptp_state_machine(void) +{ + int port; + struct gptp_port_ds *port_ds; + + /* Manage port states. */ + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + port_ds = GPTP_PORT_DS(port); + + switch (GPTP_GLOBAL_DS()->selected_role[port]) { + case GPTP_PORT_DISABLED: + case GPTP_PORT_MASTER: + case GPTP_PORT_PASSIVE: + case GPTP_PORT_SLAVE: + gptp_md_state_machines(port); + gptp_mi_port_sync_state_machines(port); + gptp_mi_port_bmca_state_machines(port); + break; + default: + NET_DBG("%s: Unknown port state", __func__); + break; + } + + port_ds->prev_ptt_port_enabled = port_ds->ptt_port_enabled; + } + + gptp_mi_state_machines(); +} + +static void gptp_thread(void) +{ + int port; + + NET_DBG("Starting PTP thread"); + + gptp_init_clock_ds(); + + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + gptp_init_port_ds(port); + GPTP_GLOBAL_DS()->selected_role[port] = GPTP_PORT_DISABLED; + } + + while (1) { + struct net_pkt *pkt; + + pkt = k_fifo_get(&gptp_rx_queue, + K_MSEC(GPTP_THREAD_WAIT_TIMEOUT_MS)); + if (pkt) { + gptp_handle_msg(pkt); + net_pkt_unref(pkt); + } + + gptp_state_machine(); + } +} + + +static void gptp_add_port(struct net_if *iface, void *user_data) +{ + int *num_ports = user_data; + struct ptp_clock *clk; + + if (*num_ports >= CONFIG_NET_GPTP_NUM_PORTS) { + return; + } + + /* Check if interface has a PTP clock. */ + clk = ptp_clock_lookup_by_dev(net_if_get_device(iface)); + if (clk) { + gptp_domain.iface[*num_ports] = iface; + (*num_ports)++; + } +} + +void gptp_set_time_itv(struct gptp_uscaled_ns *interval, + u16_t seconds, + s8_t log_msg_interval) +{ + int i; + + if (seconds == 0) { + interval->low = 0; + interval->high = 0; + return; + } else if (log_msg_interval >= 96) { + /* Overflow, set maximum. */ + interval->low = UINT64_MAX; + interval->high = UINT32_MAX; + + return; + } else if (log_msg_interval <= -64) { + /* Underflow, set to 0. */ + interval->low = 0; + interval->high = 0; + return; + } + + + /* NSEC_PER_SEC is between 2^30 and 2^31, seconds is less thant 2^16, + * thus the computation will be less than 2^63. + */ + interval->low = + (seconds * + (u64_t)NSEC_PER_SEC) << 16; + + if (log_msg_interval <= 0) { + interval->low >>= -log_msg_interval; + interval->high = 0; + } else { + /* Find highest bit set. */ + for (i = 63; i >= 0; i--) { + if (interval->low >> i) { + break; + } + } + + if ((i + log_msg_interval) >= 96) { + /* Overflow, set maximum. */ + interval->low = UINT64_MAX; + interval->high = UINT32_MAX; + } else { + interval->high = + interval->low >> (64 - log_msg_interval); + interval->low <<= log_msg_interval; + } + } +} + +s32_t gptp_uscaled_ns_to_timer_ms(struct gptp_uscaled_ns *usns) +{ + u64_t tmp; + + if (usns->high) { + /* Do not calculate, it reaches max value. */ + return INT32_MAX; + } + + tmp = (usns->low >> 16) / 1000000; + + if (tmp == 0) { + /* Timer must be started with a minimum value of 1. */ + return 1; + } + + if (tmp > INT32_MAX) { + return INT32_MAX; + } + + return (tmp & INT32_MAX); + +} + +static s32_t timer_get_remaining_and_stop(struct k_timer *timer) +{ + int key; + s32_t timer_value; + + key = irq_lock(); + timer_value = k_timer_remaining_get(timer); + + /* Stop timer as the period is about to be modified. */ + k_timer_stop(timer); + irq_unlock(key); + + return timer_value; +} + +static s32_t update_itv(struct gptp_uscaled_ns *itv, + s8_t *cur_log_itv, + s8_t *ini_log_itv, + s8_t new_log_itv, + s8_t correction_log_itv) +{ + switch (new_log_itv) { + case GPTP_ITV_KEEP: + break; + case GPTP_ITV_SET_TO_INIT: + *cur_log_itv = *ini_log_itv; + gptp_set_time_itv(itv, 1, *ini_log_itv); + break; + case GPTP_ITV_STOP: + default: + *cur_log_itv = new_log_itv + correction_log_itv; + gptp_set_time_itv(itv, 1, *cur_log_itv); + break; + } + + return gptp_uscaled_ns_to_timer_ms(itv); +} + +void gptp_update_pdelay_req_interval(int port, s8_t log_val) +{ + s32_t remaining; + s32_t new_itv, old_itv; + struct gptp_pdelay_req_state *state_pdelay; + struct gptp_port_ds *port_ds; + + port_ds = GPTP_PORT_DS(port); + state_pdelay = &GPTP_PORT_STATE(port)->pdelay_req; + remaining = timer_get_remaining_and_stop(&state_pdelay->pdelay_timer); + + old_itv = gptp_uscaled_ns_to_timer_ms(&port_ds->pdelay_req_itv); + new_itv = update_itv(&port_ds->pdelay_req_itv, + &port_ds->cur_log_pdelay_req_itv, + &port_ds->ini_log_pdelay_req_itv, + log_val, + 0); + + new_itv -= (old_itv-remaining); + if (new_itv <= 0) { + new_itv = 1; + } + + k_timer_start(&state_pdelay->pdelay_timer, new_itv, 0); +} + +void gptp_update_sync_interval(int port, s8_t log_val) +{ s32_t remaining; + s32_t new_itv, old_itv, period; + u32_t time_spent; + struct gptp_port_ds *port_ds; + struct gptp_pss_send_state *state_pss_send; + + port_ds = GPTP_PORT_DS(port); + state_pss_send = &GPTP_PORT_STATE(port)->pss_send; + remaining = + timer_get_remaining_and_stop( + &state_pss_send->half_sync_itv_timer); + old_itv = gptp_uscaled_ns_to_timer_ms(&port_ds->half_sync_itv); + new_itv = update_itv(&port_ds->half_sync_itv, + &port_ds->cur_log_half_sync_itv, + &port_ds->ini_log_half_sync_itv, + log_val, + -1); + period = new_itv; + + /* Get the time spent from the start of the timer. */ + time_spent = old_itv; + if (state_pss_send->half_sync_itv_timer_expired) { + time_spent *= 2; + } + time_spent -= remaining; + + /* Calculate remaining time and if half timer has expired. */ + if ((time_spent / 2) > new_itv) { + state_pss_send->sync_itv_timer_expired = true; + state_pss_send->half_sync_itv_timer_expired = true; + new_itv = 1; + } else if (time_spent > new_itv) { + state_pss_send->sync_itv_timer_expired = false; + state_pss_send->half_sync_itv_timer_expired = true; + new_itv -= (time_spent - new_itv); + } else { + state_pss_send->sync_itv_timer_expired = false; + state_pss_send->half_sync_itv_timer_expired = false; + new_itv -= time_spent; + } + + if (new_itv <= 0) { + new_itv = 1; + } + + k_timer_start(&state_pss_send->half_sync_itv_timer, new_itv, period); +} + +void gptp_update_announce_interval(int port, s8_t log_val) +{ + s32_t remaining; + s32_t new_itv, old_itv; + struct gptp_port_announce_transmit_state *state_ann; + struct gptp_port_bmca_data *bmca_data; + struct gptp_port_ds *port_ds; + + port_ds = GPTP_PORT_DS(port); + state_ann = &GPTP_PORT_STATE(port)->pa_transmit; + bmca_data = GPTP_PORT_BMCA_DATA(port); + remaining = timer_get_remaining_and_stop( + &state_ann->ann_send_periodic_timer); + + old_itv = gptp_uscaled_ns_to_timer_ms(&bmca_data->announce_interval); + new_itv = update_itv(&bmca_data->announce_interval, + &port_ds->cur_log_announce_itv, + &port_ds->ini_log_announce_itv, + log_val, + 0); + + new_itv -= (old_itv-remaining); + if (new_itv <= 0) { + new_itv = 1; + } + + k_timer_start(&state_ann->ann_send_periodic_timer, new_itv, 0); +} + +struct port_user_data { + gptp_port_cb_t cb; + void *user_data; +}; + +static void gptp_get_port(struct net_if *iface, void *user_data) +{ + struct port_user_data *ud = user_data; + struct ptp_clock *clk; + + /* Check if interface has a PTP clock. */ + clk = ptp_clock_lookup_by_dev(net_if_get_device(iface)); + if (clk) { + int port = gptp_get_port_number(iface); + + if (port < 0) { + return; + } + + ud->cb(port, iface, ud->user_data); + } +} + +void gptp_foreach_port(gptp_port_cb_t cb, void *user_data) +{ + struct port_user_data ud = { + .cb = cb, + .user_data = user_data + }; + + net_if_foreach(gptp_get_port, &ud); +} + +struct gptp_domain *gptp_get_domain(void) +{ + return &gptp_domain; +} + +int gptp_get_port_data(struct gptp_domain *domain, + int port, + struct gptp_port_ds **port_ds, + struct gptp_port_param_ds **port_param_ds, + struct gptp_port_states **port_state, + struct gptp_port_bmca_data **port_bmca_data, + struct net_if **iface) +{ + if (domain != &gptp_domain) { + return -ENOENT; + } + + if (port < 0 || port > CONFIG_NET_GPTP_NUM_PORTS) { + return -EINVAL; + } + + if (port_ds) { + *port_ds = GPTP_PORT_DS(port); + } + + if (port_param_ds) { +#if defined(CONFIG_NET_GPTP_STATISTICS) + *port_param_ds = GPTP_PORT_PARAM_DS(port); +#else + *port_param_ds = NULL; +#endif + } + + if (port_state) { + *port_state = GPTP_PORT_STATE(port); + } + + if (port_bmca_data) { + *port_bmca_data = GPTP_PORT_BMCA_DATA(port); + } + + if (iface) { + *iface = GPTP_PORT_IFACE(port); + } + + return 0; +} + +void net_gptp_init(void) +{ + gptp_domain.default_ds.nb_ports = 0; + net_if_foreach(gptp_add_port, &gptp_domain.default_ds.nb_ports); + + /* Only initialize the state machine once the ports are known. */ + gptp_init_state_machine(); + + k_thread_create(&gptp_thread_data, gptp_stack, sizeof(gptp_stack), + (k_thread_entry_t)gptp_thread, + NULL, NULL, NULL, K_PRIO_PREEMPT(5), 0, 0); +} diff --git a/subsys/net/ip/l2/gptp/gptp_iface.c b/subsys/net/ip/l2/gptp/gptp_iface.c new file mode 100644 index 0000000000000..85ea0809de016 --- /dev/null +++ b/subsys/net/ip/l2/gptp/gptp_iface.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2017 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if defined(CONFIG_NET_DEBUG_GPTP) +#define SYS_LOG_DOMAIN "net/gptp" +#endif + +#include +#include +#include +#include + +#include "../../net_private.h" + +static sys_slist_t phase_dis_callbacks; + +void gptp_register_phase_dis_cb(struct gptp_phase_dis_cb *phase_dis, + gptp_phase_dis_callback_t cb) +{ + sys_slist_find_and_remove(&phase_dis_callbacks, &phase_dis->node); + sys_slist_prepend(&phase_dis_callbacks, &phase_dis->node); + + phase_dis->cb = cb; +} + +void gptp_unregister_phase_dis_cb(struct gptp_phase_dis_cb *phase_dis) +{ + sys_slist_find_and_remove(&phase_dis_callbacks, &phase_dis->node); +} + +void gptp_call_phase_dis_cb(void) +{ + struct gptp_global_ds *global_ds; + sys_snode_t *sn, *sns; + u8_t *gm_id; + + global_ds = GPTP_GLOBAL_DS(); + gm_id = &global_ds->gm_priority.root_system_id.grand_master_id[0]; + + SYS_SLIST_FOR_EACH_NODE_SAFE(&phase_dis_callbacks, sn, sns) { + struct gptp_phase_dis_cb *phase_dis = + CONTAINER_OF(sn, struct gptp_phase_dis_cb, node); + + phase_dis->cb(gm_id, + &global_ds->gm_time_base_indicator, + &global_ds->clk_src_last_gm_phase_change, + &global_ds->clk_src_last_gm_freq_change); + } +} + +int gptp_event_capture(struct net_ptp_time *slave_time, bool *gm_present) +{ + int port, key; + struct ptp_clock *clk; + + key = irq_lock(); + *gm_present = GPTP_GLOBAL_DS()->gm_present; + + for (port = GPTP_PORT_START; port <= GPTP_PORT_END; port++) { + /* Get first available clock, or slave clock if GM present. */ + if (!*gm_present || + (GPTP_GLOBAL_DS()->selected_role[port] == + GPTP_PORT_SLAVE)) { + clk = ptp_clock_lookup_by_dev(GPTP_PORT_DRV(port)); + if (clk) { + ptp_clock_get(clk, slave_time); + irq_unlock(key); + return 0; + } + } + } + + irq_unlock(key); + return -EAGAIN; +} + +char *gptp_sprint_clock_id(const u8_t *clk_id, char *output, size_t output_len) +{ + return net_sprint_ll_addr_buf(clk_id, 8, output, output_len); +} diff --git a/subsys/net/ip/l2/gptp/gptp_md.c b/subsys/net/ip/l2/gptp/gptp_md.c new file mode 100644 index 0000000000000..62ba250d8a61a --- /dev/null +++ b/subsys/net/ip/l2/gptp/gptp_md.c @@ -0,0 +1,862 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if defined(CONFIG_NET_DEBUG_GPTP) +#define SYS_LOG_DOMAIN "net/gptp" +#define NET_LOG_ENABLED 1 +#endif + +#include +#include +#include + +#include "gptp_private.h" + +static void gptp_md_sync_prepare(struct net_pkt *pkt, + struct gptp_md_sync_info *sync_send) +{ + struct gptp_hdr *hdr; + + hdr = GPTP_HDR(pkt); + + memcpy(&hdr->port_id, &sync_send->src_port_id, + sizeof(struct gptp_port_identity)); + hdr->log_msg_interval = sync_send->log_msg_interval; +} + +static void gptp_md_follow_up_prepare(struct net_pkt *pkt, + struct gptp_md_sync_info *sync_send) +{ + struct gptp_hdr *hdr; + struct gptp_follow_up *fup; + + hdr = GPTP_HDR(pkt); + fup = GPTP_FOLLOW_UP(pkt); + + /* + * Compute correction field according to + * IEEE802.1AS 11.2.14.2.3. + * + * The correction_field already contains the timestamp + * of the sync message. + * + * TODO: if the value to be stored in correction_field + * is too big to be represented, the field should + * be set to all 1's except the most significant bit. + */ + hdr->correction_field -= sync_send->upstream_tx_time; + hdr->correction_field *= sync_send->rate_ratio; + hdr->correction_field += sync_send->follow_up_correction_field; + hdr->correction_field <<= 16; + + memcpy(&hdr->port_id, &sync_send->src_port_id, + sizeof(struct gptp_port_identity)); + hdr->log_msg_interval = sync_send->log_msg_interval; + + fup->prec_orig_ts_secs_high = + htons(sync_send->precise_orig_ts._sec.high); + fup->prec_orig_ts_secs_low = + htonl(sync_send->precise_orig_ts._sec.low); + fup->prec_orig_ts_nsecs = + htonl(sync_send->precise_orig_ts.nanosecond); + + fup->tlv.type = htons(GPTP_TLV_ORGANIZATION_EXT); + fup->tlv.len = htons(sizeof(struct gptp_follow_up_tlv)); + fup->tlv.org_id[0] = GPTP_FUP_TLV_ORG_ID_BYTE_0; + fup->tlv.org_id[1] = GPTP_FUP_TLV_ORG_ID_BYTE_1; + fup->tlv.org_id[2] = GPTP_FUP_TLV_ORG_ID_BYTE_2; + fup->tlv.org_sub_type[0] = 0; + fup->tlv.org_sub_type[1] = 0; + fup->tlv.org_sub_type[2] = GPTP_FUP_TLV_ORG_SUB_TYPE; + + fup->tlv.cumulative_scaled_rate_offset = + (sync_send->rate_ratio - 1.0) * GPTP_POW2(41); + fup->tlv.cumulative_scaled_rate_offset = + ntohl(fup->tlv.cumulative_scaled_rate_offset); + fup->tlv.gm_time_base_indicator = + ntohs(sync_send->gm_time_base_indicator); + fup->tlv.last_gm_phase_change.high = + ntohl(sync_send->last_gm_phase_change.high); + fup->tlv.last_gm_phase_change.low = + ntohll(sync_send->last_gm_phase_change.low); + fup->tlv.scaled_last_gm_freq_change = sync_send->last_gm_freq_change; + fup->tlv.scaled_last_gm_freq_change = + ntohl(fup->tlv.scaled_last_gm_freq_change); +} + +static int gptp_set_md_sync_receive(int port, + struct gptp_md_sync_info *sync_rcv) +{ + struct gptp_sync_rcv_state *state; + struct gptp_port_ds *port_ds; + struct gptp_hdr *sync_hdr, *fup_hdr; + struct gptp_follow_up *fup; + struct net_ptp_time *sync_ts; + double prop_delay_rated; + double delay_asymmetry_rated; + + state = &GPTP_PORT_STATE(port)->sync_rcv; + + if (!state->rcvd_sync_ptr || !state->rcvd_follow_up_ptr) { + return -1; + } + + port_ds = GPTP_PORT_DS(port); + + sync_hdr = GPTP_HDR(state->rcvd_sync_ptr); + fup_hdr = GPTP_HDR(state->rcvd_follow_up_ptr); + fup = GPTP_FOLLOW_UP(state->rcvd_follow_up_ptr); + sync_ts = &state->rcvd_sync_ptr->timestamp; + + sync_rcv->follow_up_correction_field = + ntohll(fup_hdr->correction_field); + memcpy(&sync_rcv->src_port_id, &sync_hdr->port_id, + sizeof(struct gptp_port_identity)); + sync_rcv->log_msg_interval = fup_hdr->log_msg_interval; + sync_rcv->precise_orig_ts._sec.high = + ntohs(fup->prec_orig_ts_secs_high); + sync_rcv->precise_orig_ts._sec.low = ntohl(fup->prec_orig_ts_secs_low); + sync_rcv->precise_orig_ts.nanosecond = ntohl(fup->prec_orig_ts_nsecs); + + /* Compute time when sync was sent by the remote. */ + sync_rcv->upstream_tx_time = sync_ts->second; + sync_rcv->upstream_tx_time *= NSEC_PER_SEC; + sync_rcv->upstream_tx_time += sync_ts->nanosecond; + + prop_delay_rated = port_ds->neighbor_prop_delay; + prop_delay_rated /= port_ds->neighbor_rate_ratio; + + sync_rcv->upstream_tx_time -= prop_delay_rated; + + delay_asymmetry_rated = port_ds->delay_asymmetry; + delay_asymmetry_rated /= port_ds->neighbor_rate_ratio; + + sync_rcv->upstream_tx_time -= delay_asymmetry_rated; + + sync_rcv->rate_ratio = ntohl(fup->tlv.cumulative_scaled_rate_offset); + sync_rcv->rate_ratio *= GPTP_POW2(-41); + sync_rcv->rate_ratio += 1; + + sync_rcv->gm_time_base_indicator = + ntohs(fup->tlv.gm_time_base_indicator); + sync_rcv->last_gm_phase_change.high = + ntohl(fup->tlv.last_gm_phase_change.high); + sync_rcv->last_gm_phase_change.low = + ntohll(fup->tlv.last_gm_phase_change.low); + sync_rcv->last_gm_freq_change = + ntohl(fup->tlv.scaled_last_gm_freq_change); + + return 0; +} + +static void gptp_md_pdelay_reset(int port) +{ + struct gptp_pdelay_req_state *state; + struct gptp_port_ds *port_ds; + + NET_WARN("Reset Pdelay requests"); + + state = &GPTP_PORT_STATE(port)->pdelay_req; + port_ds = GPTP_PORT_DS(port); + + if (state->lost_responses < port_ds->allowed_lost_responses) { + state->lost_responses += 1; + } else { + port_ds->is_measuring_delay = false; + port_ds->as_capable = false; + state->init_pdelay_compute = true; + } +} + +static void gptp_md_pdelay_check_multiple_resp(int port) +{ + struct gptp_pdelay_req_state *state; + struct gptp_port_ds *port_ds; + int duration; + + state = &GPTP_PORT_STATE(port)->pdelay_req; + port_ds = GPTP_PORT_DS(port); + + if ((state->rcvd_pdelay_resp > 1) || + (state->rcvd_pdelay_follow_up > 1)) { + port_ds->as_capable = false; + NET_WARN("Too many responses (%d / %d)", + state->rcvd_pdelay_resp, + state->rcvd_pdelay_follow_up); + state->multiple_resp_count++; + } else { + state->multiple_resp_count = 0; + } + + if (state->multiple_resp_count >= 3) { + state->multiple_resp_count = 0; + k_timer_stop(&state->pdelay_timer); + state->pdelay_timer_expired = false; + + /* Substract time spent since last pDelay request. */ + duration = GPTP_MULTIPLE_PDELAY_RESP_WAIT - + gptp_uscaled_ns_to_timer_ms( + &port_ds->pdelay_req_itv); + k_timer_start(&state->pdelay_timer, duration, 0); + } else { + state->state = GPTP_PDELAY_REQ_SEND_REQ; + } +} + +static void gptp_md_compute_pdelay_rate_ratio(int port) +{ + struct gptp_pdelay_req_state *state; + struct gptp_port_ds *port_ds; + struct net_pkt *pkt; + struct gptp_hdr *hdr; + struct gptp_pdelay_resp_follow_up *fup; + + u64_t ingress_tstamp = 0; + u64_t resp_evt_tstamp = 0; + + double neighbor_rate_ratio; + + state = &GPTP_PORT_STATE(port)->pdelay_req; + port_ds = GPTP_PORT_DS(port); + + /* Get ingress timestamp. */ + pkt = state->rcvd_pdelay_resp_ptr; + if (pkt) { + ingress_tstamp = + gptp_timestamp_to_nsec(net_pkt_timestamp(pkt)); + } + + /* Get peer corrected timestamp. */ + pkt = state->rcvd_pdelay_follow_up_ptr; + if (pkt) { + hdr = GPTP_HDR(pkt); + fup = GPTP_PDELAY_RESP_FOLLOWUP(pkt); + + resp_evt_tstamp = ntohs(fup->resp_orig_ts_secs_high); + resp_evt_tstamp <<= 32; + resp_evt_tstamp |= ntohl(fup->resp_orig_ts_secs_low); + resp_evt_tstamp *= NSEC_PER_SEC; + resp_evt_tstamp += ntohl(fup->resp_orig_ts_nsecs); + resp_evt_tstamp += (ntohll(hdr->correction_field) >> 16); + } + + if (state->init_pdelay_compute) { + state->init_pdelay_compute = false; + + state->ini_resp_ingress_tstamp = ingress_tstamp; + state->ini_resp_evt_tstamp = resp_evt_tstamp; + + neighbor_rate_ratio = 1.0; + + state->neighbor_rate_ratio_valid = false; + } else { + neighbor_rate_ratio = + (resp_evt_tstamp - state->ini_resp_evt_tstamp); + neighbor_rate_ratio /= + (ingress_tstamp - state->ini_resp_ingress_tstamp); + + /* Measure the ratio with the previously sent response. */ + state->ini_resp_ingress_tstamp = ingress_tstamp; + state->ini_resp_evt_tstamp = resp_evt_tstamp; + state->neighbor_rate_ratio_valid = true; + } + + port_ds->neighbor_rate_ratio = neighbor_rate_ratio; +} + +static void gptp_md_compute_prop_time(int port) +{ + struct gptp_pdelay_req_state *state; + struct gptp_port_ds *port_ds; + struct net_pkt *pkt; + struct gptp_hdr *hdr; + struct gptp_pdelay_resp *resp; + struct gptp_pdelay_resp_follow_up *fup; + u64_t t1_ns = 0, t2_ns = 0, t3_ns = 0, t4_ns = 0; + double prop_time; + + state = &GPTP_PORT_STATE(port)->pdelay_req; + port_ds = GPTP_PORT_DS(port); + + /* Get egress timestamp. */ + pkt = state->tx_pdelay_req_ptr; + if (pkt) { + t1_ns = gptp_timestamp_to_nsec(net_pkt_timestamp(pkt)); + } + + /* Get ingress timestamp. */ + pkt = state->rcvd_pdelay_resp_ptr; + if (pkt) { + t4_ns = gptp_timestamp_to_nsec(net_pkt_timestamp(pkt)); + } + + /* Get peer corrected timestamps. */ + pkt = state->rcvd_pdelay_resp_ptr; + if (pkt) { + hdr = GPTP_HDR(pkt); + resp = GPTP_PDELAY_RESP(pkt); + + t2_ns = ((u64_t)ntohs(resp->req_receipt_ts_secs_high)) << 32; + t2_ns |= ntohl(resp->req_receipt_ts_secs_low); + t2_ns *= NSEC_PER_SEC; + t2_ns += ntohl(resp->req_receipt_ts_nsecs); + t2_ns += (ntohll(hdr->correction_field) >> 16); + } + + pkt = state->rcvd_pdelay_follow_up_ptr; + if (pkt) { + hdr = GPTP_HDR(pkt); + fup = GPTP_PDELAY_RESP_FOLLOWUP(pkt); + + t3_ns = ((u64_t)ntohs(fup->resp_orig_ts_secs_high)) << 32; + t3_ns |= ntohl(fup->resp_orig_ts_secs_low); + t3_ns *= NSEC_PER_SEC; + t3_ns += ntohl(fup->resp_orig_ts_nsecs); + t3_ns += (ntohll(hdr->correction_field) >> 16); + } + + prop_time = (t4_ns - t1_ns); + prop_time *= port_ds->neighbor_rate_ratio; + prop_time -= (t3_ns - t2_ns); + prop_time /= 2; + + port_ds->neighbor_prop_delay = prop_time; +} + +static void gptp_md_pdelay_compute(int port) +{ + struct gptp_pdelay_req_state *state; + struct gptp_port_ds *port_ds; + struct gptp_hdr *hdr; + struct net_pkt *pkt; + bool local_clock; + + state = &GPTP_PORT_STATE(port)->pdelay_req; + port_ds = GPTP_PORT_DS(port); + + if (!state->tx_pdelay_req_ptr || !state->rcvd_pdelay_resp_ptr || + !state->rcvd_pdelay_follow_up_ptr) { + NET_ERR("Compute path delay called without buffer ready"); + port_ds->as_capable = false; + goto out; + } + + if (port_ds->compute_neighbor_rate_ratio) { + gptp_md_compute_pdelay_rate_ratio(port); + } + + if (port_ds->compute_neighbor_prop_delay) { + gptp_md_compute_prop_time(port); + } + + state->lost_responses = 0; + port_ds->is_measuring_delay = true; + + pkt = state->rcvd_pdelay_follow_up_ptr; + hdr = GPTP_HDR(pkt); + + local_clock = !memcmp(gptp_domain.default_ds.clk_id, + hdr->port_id.clk_id, + GPTP_CLOCK_ID_LEN); + + if (local_clock) { + NET_WARN("Discard path delay response from local clock."); + goto out; + } + + if (!state->neighbor_rate_ratio_valid) { + goto out; + } + + /* + * Currently, if the computed delay is negative, this means + * that it is negligeable enough compared to other factors. + */ + if ((port_ds->neighbor_prop_delay <= + port_ds->neighbor_prop_delay_thresh)) { + port_ds->as_capable = true; + } else { + port_ds->as_capable = false; + + NET_WARN("Not AS capable: %u ns > %u ns", + (u32_t)port_ds->neighbor_prop_delay, + (u32_t)port_ds->neighbor_prop_delay_thresh); + + GPTP_STATS_INC(port, neighbor_prop_delay_exceeded); + } + +out: + /* Release buffers. */ + if (state->tx_pdelay_req_ptr) { + net_pkt_unref(state->tx_pdelay_req_ptr); + state->tx_pdelay_req_ptr = NULL; + } + + if (state->rcvd_pdelay_resp_ptr) { + net_pkt_unref(state->rcvd_pdelay_resp_ptr); + state->rcvd_pdelay_resp_ptr = NULL; + } + + if (state->rcvd_pdelay_follow_up_ptr) { + net_pkt_unref(state->rcvd_pdelay_follow_up_ptr); + state->rcvd_pdelay_follow_up_ptr = NULL; + } +} + +static void gptp_md_pdelay_req_timeout(struct k_timer *timer) +{ + struct gptp_pdelay_req_state *state; + int port; + + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + state = &GPTP_PORT_STATE(port)->pdelay_req; + if (timer == &state->pdelay_timer) { + state->pdelay_timer_expired = true; + + GPTP_STATS_INC(port, + pdelay_allowed_lost_resp_exceed_count); + } + } +} + +static void gptp_md_start_pdelay_req(int port) +{ + struct gptp_pdelay_req_state *state; + struct gptp_port_ds *port_ds; + + port_ds = GPTP_PORT_DS(port); + state = &GPTP_PORT_STATE(port)->pdelay_req; + + port_ds->neighbor_rate_ratio = 1.0; + port_ds->is_measuring_delay = false; + port_ds->as_capable = false; + state->lost_responses = 0; + state->rcvd_pdelay_resp = 0; + state->rcvd_pdelay_follow_up = 0; + state->multiple_resp_count = 0; +} + +static void gptp_md_follow_up_receipt_timeout(struct k_timer *timer) +{ + struct gptp_sync_rcv_state *state; + int port; + + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + state = &GPTP_PORT_STATE(port)->sync_rcv; + if (timer == &state->follow_up_discard_timer) { + NET_WARN("No follow up received after sync message"); + state->follow_up_timeout_expired = true; + } + } +} + +static void gptp_md_init_pdelay_req_state_machine(int port) +{ + struct gptp_pdelay_req_state *state; + + state = &GPTP_PORT_STATE(port)->pdelay_req; + + k_timer_init(&state->pdelay_timer, + gptp_md_pdelay_req_timeout, NULL); + + state->state = GPTP_PDELAY_REQ_NOT_ENABLED; + + state->neighbor_rate_ratio_valid = false; + state->init_pdelay_compute = true; + state->rcvd_pdelay_resp = 0; + state->rcvd_pdelay_follow_up = 0; + state->pdelay_timer_expired = false; + + state->rcvd_pdelay_resp_ptr = NULL; + state->rcvd_pdelay_follow_up_ptr = NULL; + state->tx_pdelay_req_ptr = NULL; + + state->ini_resp_evt_tstamp = 0; + state->ini_resp_ingress_tstamp = 0; + state->lost_responses = 0; +} + +static void gptp_md_init_pdelay_resp_state_machine(int port) +{ + struct gptp_pdelay_resp_state *state; + + state = &GPTP_PORT_STATE(port)->pdelay_resp; + + state->state = GPTP_PDELAY_RESP_NOT_ENABLED; +} + +static void gptp_md_init_sync_rcv_state_machine(int port) +{ + struct gptp_sync_rcv_state *state; + + state = &GPTP_PORT_STATE(port)->sync_rcv; + + k_timer_init(&state->follow_up_discard_timer, + gptp_md_follow_up_receipt_timeout, NULL); + + state->rcvd_sync = false; + state->rcvd_follow_up = false; + state->follow_up_timeout_expired = false; + + state->follow_up_receipt_timeout = 0; + + state->rcvd_sync_ptr = NULL; + state->rcvd_follow_up_ptr = NULL; + + state->state = GPTP_SYNC_RCV_DISCARD; +} + +static void gptp_md_init_sync_send_state_machine(int port) +{ + struct gptp_sync_send_state *state; + + state = &GPTP_PORT_STATE(port)->sync_send; + + state->rcvd_md_sync = false; + state->md_sync_timestamp_avail = false; + state->sync_send_ptr = NULL; + state->sync_ptr = NULL; + + state->state = GPTP_SYNC_SEND_INITIALIZING; +} + +void gptp_md_init_state_machine(void) +{ + int port; + + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + gptp_md_init_pdelay_req_state_machine(port); + gptp_md_init_pdelay_resp_state_machine(port); + gptp_md_init_sync_rcv_state_machine(port); + gptp_md_init_sync_send_state_machine(port); + } +} + +static void gptp_md_pdelay_req_state_machine(int port) +{ + struct gptp_port_ds *port_ds; + struct gptp_pdelay_req_state *state; + struct net_pkt *pkt; + + state = &GPTP_PORT_STATE(port)->pdelay_req; + port_ds = GPTP_PORT_DS(port); + + /* Unset AS-Capable if multiple responses to a pDelay request have been + * reveived. + */ + if (state->rcvd_pdelay_resp > 1 || state->rcvd_pdelay_follow_up > 1) { + port_ds->as_capable = false; + } + + if (!port_ds->ptt_port_enabled) { + /* Make sure the timer is stopped. */ + k_timer_stop(&state->pdelay_timer); + state->state = GPTP_PDELAY_REQ_NOT_ENABLED; + } + + switch (state->state) { + case GPTP_PDELAY_REQ_NOT_ENABLED: + if (port_ds->ptt_port_enabled) { + /* (Re)Init interval (as defined in + * LinkDelaySyncIntervalSetting state machine). + */ + port_ds->cur_log_pdelay_req_itv = + port_ds->ini_log_pdelay_req_itv; + + gptp_set_time_itv(&port_ds->pdelay_req_itv, 1, + port_ds->cur_log_pdelay_req_itv); + + port_ds->compute_neighbor_rate_ratio = true; + port_ds->compute_neighbor_prop_delay = true; + + state->pdelay_timer_expired = true; + state->state = GPTP_PDELAY_REQ_INITIAL_SEND_REQ; + } + break; + + case GPTP_PDELAY_REQ_RESET: + gptp_md_pdelay_reset(port); + /* Send a request on the next timer expiry. */ + state->state = GPTP_PDELAY_REQ_WAIT_ITV_TIMER; + break; + + case GPTP_PDELAY_REQ_INITIAL_SEND_REQ: + gptp_md_start_pdelay_req(port); + + case GPTP_PDELAY_REQ_SEND_REQ: + if (state->tx_pdelay_req_ptr) { + net_pkt_unref(state->tx_pdelay_req_ptr); + state->tx_pdelay_req_ptr = NULL; + } + + if (state->rcvd_pdelay_resp_ptr) { + net_pkt_unref(state->rcvd_pdelay_resp_ptr); + state->rcvd_pdelay_resp_ptr = NULL; + } + + if (state->rcvd_pdelay_follow_up_ptr) { + net_pkt_unref(state->rcvd_pdelay_follow_up_ptr); + state->rcvd_pdelay_follow_up_ptr = NULL; + } + + gptp_send_pdelay_req(port); + + k_timer_stop(&state->pdelay_timer); + state->pdelay_timer_expired = false; + k_timer_start(&state->pdelay_timer, + gptp_uscaled_ns_to_timer_ms( + &port_ds->pdelay_req_itv), + 0); + /* + * Transition directly to GPTP_PDELAY_REQ_WAIT_RESP. + * Check for the TX timestamp will be done during + * the computation of the path delay. + */ + state->state = GPTP_PDELAY_REQ_WAIT_RESP; + break; + + case GPTP_PDELAY_REQ_WAIT_RESP: + if (state->pdelay_timer_expired) { + state->state = GPTP_PDELAY_REQ_RESET; + } else if (state->rcvd_pdelay_resp != 0) { + pkt = state->rcvd_pdelay_resp_ptr; + if (!gptp_handle_pdelay_resp(port, pkt)) { + state->state = GPTP_PDELAY_REQ_WAIT_FOLLOW_UP; + } else { + state->state = GPTP_PDELAY_REQ_RESET; + } + } + break; + + case GPTP_PDELAY_REQ_WAIT_FOLLOW_UP: + if (state->pdelay_timer_expired) { + state->state = GPTP_PDELAY_REQ_RESET; + } else if (state->rcvd_pdelay_follow_up != 0) { + pkt = state->rcvd_pdelay_follow_up_ptr; + if (!gptp_handle_pdelay_follow_up(port, pkt)) { + gptp_md_pdelay_compute(port); + state->state = GPTP_PDELAY_REQ_WAIT_ITV_TIMER; + } else { + state->state = GPTP_PDELAY_REQ_RESET; + } + } + break; + + case GPTP_PDELAY_REQ_WAIT_ITV_TIMER: + if (state->pdelay_timer_expired) { + gptp_md_pdelay_check_multiple_resp(port); + + state->rcvd_pdelay_resp = 0; + state->rcvd_pdelay_follow_up = 0; + } + break; + } +} + +static void gptp_md_pdelay_resp_state_machine(int port) +{ + struct gptp_port_ds *port_ds; + struct gptp_pdelay_resp_state *state; + + state = &GPTP_PORT_STATE(port)->pdelay_resp; + port_ds = GPTP_PORT_DS(port); + + if (!port_ds->ptt_port_enabled) { + state->state = GPTP_PDELAY_RESP_NOT_ENABLED; + } + + switch (state->state) { + case GPTP_PDELAY_RESP_NOT_ENABLED: + if (port_ds->ptt_port_enabled) { + state->state = GPTP_PDELAY_RESP_INITIAL_WAIT_REQ; + } + break; + + case GPTP_PDELAY_RESP_INITIAL_WAIT_REQ: + case GPTP_PDELAY_RESP_WAIT_REQ: + /* Handled in gptp_handle_msg for latency considerations. */ + break; + + case GPTP_PDELAY_RESP_WAIT_TSTAMP: + /* Handled in gptp_follow_up_callback. */ + break; + } + +} + +static void gptp_md_sync_receive_state_machine(int port) +{ + struct gptp_port_ds *port_ds; + struct gptp_sync_rcv_state *state; + struct gptp_pss_rcv_state *pss_state; + + state = &GPTP_PORT_STATE(port)->sync_rcv; + pss_state = &GPTP_PORT_STATE(port)->pss_rcv; + port_ds = GPTP_PORT_DS(port); + + if ((!port_ds->ptt_port_enabled) || !port_ds->as_capable) { + /* Make sure the timer is stopped. */ + k_timer_stop(&state->follow_up_discard_timer); + + /* Discard all received messages. */ + if (state->rcvd_sync_ptr) { + net_pkt_unref(state->rcvd_sync_ptr); + state->rcvd_sync_ptr = NULL; + } + + if (state->rcvd_follow_up_ptr) { + net_pkt_unref(state->rcvd_follow_up_ptr); + state->rcvd_follow_up_ptr = NULL; + } + + state->rcvd_sync = false; + state->rcvd_follow_up = false; + state->state = GPTP_SYNC_RCV_DISCARD; + return; + } + + switch (state->state) { + case GPTP_SYNC_RCV_DISCARD: + case GPTP_SYNC_RCV_WAIT_SYNC: + if (state->rcvd_sync) { + gptp_handle_sync(port, state->rcvd_sync_ptr); + state->rcvd_sync = false; + state->state = GPTP_SYNC_RCV_WAIT_FOLLOW_UP; + } else if (state->rcvd_follow_up) { + /* Delete late/early message. */ + if (state->rcvd_follow_up_ptr) { + net_pkt_unref(state->rcvd_follow_up_ptr); + state->rcvd_follow_up_ptr = NULL; + } + + state->rcvd_follow_up = false; + } + break; + + case GPTP_SYNC_RCV_WAIT_FOLLOW_UP: + /* Never received a follow up for a sync message. */ + if (state->follow_up_timeout_expired) { + k_timer_stop(&state->follow_up_discard_timer); + state->follow_up_timeout_expired = false; + state->state = GPTP_SYNC_RCV_DISCARD; + if (state->rcvd_sync_ptr) { + net_pkt_unref(state->rcvd_sync_ptr); + state->rcvd_sync_ptr = NULL; + } + + state->rcvd_sync = false; + } else if (state->rcvd_sync) { + /* Handle received extra sync. */ + gptp_handle_sync(port, state->rcvd_sync_ptr); + state->rcvd_sync = false; + } else if (state->rcvd_follow_up) { + if (!gptp_handle_follow_up(port, + state->rcvd_follow_up_ptr)) { + /* + * Fill the structure to be sent to + * PortSyncSyncReceive. + */ + gptp_set_md_sync_receive(port, + &pss_state->sync_rcv); + + pss_state->rcvd_md_sync = true; + + state->state = GPTP_SYNC_RCV_WAIT_SYNC; + + /* Buffers can be released now. */ + if (state->rcvd_sync_ptr) { + net_pkt_unref(state->rcvd_sync_ptr); + state->rcvd_sync_ptr = NULL; + } + + k_timer_stop(&state->follow_up_discard_timer); + state->follow_up_timeout_expired = false; + } + } + + if (state->rcvd_follow_up_ptr) { + net_pkt_unref(state->rcvd_follow_up_ptr); + state->rcvd_follow_up_ptr = NULL; + } + + state->rcvd_follow_up = false; + break; + } +} + +static void gptp_md_sync_send_state_machine(int port) +{ + struct gptp_port_ds *port_ds; + struct gptp_sync_send_state *state; + struct net_pkt *pkt; + + state = &GPTP_PORT_STATE(port)->sync_send; + port_ds = GPTP_PORT_DS(port); + + if ((!port_ds->ptt_port_enabled) || !port_ds->as_capable) { + state->rcvd_md_sync = false; + state->state = GPTP_SYNC_SEND_INITIALIZING; + + /* + * Sync sequence id is initialized in the port_ds + * init function. + */ + return; + } + + switch (state->state) { + case GPTP_SYNC_SEND_INITIALIZING: + state->state = GPTP_SYNC_SEND_SEND_SYNC; + break; + + case GPTP_SYNC_SEND_SEND_SYNC: + if (state->rcvd_md_sync) { + pkt = gptp_prepare_sync(port); + if (pkt) { + /* Reference message to track timestamp info */ + state->sync_ptr = net_pkt_ref(pkt); + gptp_md_sync_prepare(pkt, + state->sync_send_ptr); + gptp_send_sync(port, pkt); + } + + state->rcvd_md_sync = false; + state->state = GPTP_SYNC_SEND_SEND_FUP; + } + break; + + case GPTP_SYNC_SEND_SEND_FUP: + if (state->md_sync_timestamp_avail) { + state->md_sync_timestamp_avail = false; + + if (!state->sync_ptr) { + NET_ERR("Sync message not available"); + break; + } + + pkt = gptp_prepare_follow_up(port, state->sync_ptr); + if (pkt) { + gptp_md_follow_up_prepare(pkt, + state->sync_send_ptr); + gptp_send_follow_up(port, pkt); + } + + net_pkt_unref(state->sync_ptr); + state->sync_ptr = NULL; + + state->state = GPTP_SYNC_SEND_SEND_SYNC; + } + break; + } +} + +void gptp_md_state_machines(int port) +{ + gptp_md_pdelay_req_state_machine(port); + gptp_md_pdelay_resp_state_machine(port); + gptp_md_sync_receive_state_machine(port); + gptp_md_sync_send_state_machine(port); +} diff --git a/subsys/net/ip/l2/gptp/gptp_messages.c b/subsys/net/ip/l2/gptp/gptp_messages.c new file mode 100644 index 0000000000000..f24598e57a1fd --- /dev/null +++ b/subsys/net/ip/l2/gptp/gptp_messages.c @@ -0,0 +1,1167 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if defined(CONFIG_NET_DEBUG_GPTP) +#define SYS_LOG_DOMAIN "net/gptp" +#define NET_LOG_ENABLED 1 +#endif + +#include + +#include +#include +#include + +#include "gptp_private.h" + +#define NET_BUF_TIMEOUT MSEC(100) + +static struct net_if_timestamp_cb sync_timestamp_cb; +static struct net_if_timestamp_cb pdelay_response_timestamp_cb; +static bool ts_cb_registered; +static bool sync_cb_registered; + +static const struct net_eth_addr gptp_multicast_eth_addr = { + { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e } }; + +#define PRINT_INFO(msg, pkt) \ + if (IS_ENABLED(NET_LOG_ENABLED)) { \ + struct gptp_hdr *hdr = GPTP_HDR(pkt); \ + \ + ARG_UNUSED(hdr); \ + \ + NET_DBG("Sending %s seq %d pkt %p", msg, \ + hdr->sequence_id, pkt); \ + } + +static void gptp_sync_timestamp_callback(struct net_pkt *pkt) +{ + int port = 0; + struct gptp_sync_send_state *state; + struct gptp_hdr *hdr; + + port = gptp_get_port_number(net_pkt_iface(pkt)); + if (port == -ENODEV) { + NET_DBG("No port found for ptp buffer"); + return; + } + + state = &GPTP_PORT_STATE(port)->sync_send; + + hdr = GPTP_HDR(pkt); + + /* If this buffer is a sync, flag it to the state machine. */ + if (hdr->message_type == GPTP_SYNC_MESSAGE) { + state->md_sync_timestamp_avail = true; + + net_if_unregister_timestamp_cb(&sync_timestamp_cb); + sync_cb_registered = false; + + /* The pkt was ref'ed in gptp_send_sync() */ + net_pkt_unref(pkt); + } +} + +static void gptp_pdelay_response_timestamp_callback(struct net_pkt *pkt) +{ + int port = 0; + struct net_pkt *follow_up; + struct gptp_hdr *hdr; + + port = gptp_get_port_number(net_pkt_iface(pkt)); + if (port == -ENODEV) { + NET_DBG("No port found for ptp buffer"); + goto out; + } + + hdr = GPTP_HDR(pkt); + + /* If this buffer is a path delay response, send the follow up. */ + if (hdr->message_type == GPTP_PATH_DELAY_RESP_MESSAGE) { + follow_up = gptp_prepare_pdelay_follow_up(port, pkt); + if (!follow_up) { + /* Cannot handle the follow up, abort */ + NET_ERR("Could not get buffer"); + goto out; + } + + net_if_unregister_timestamp_cb(&pdelay_response_timestamp_cb); + ts_cb_registered = false; + + gptp_send_pdelay_follow_up(port, follow_up, + net_pkt_timestamp(pkt)); + +out: + /* The pkt was ref'ed in gptp_handle_pdelay_req() */ + net_pkt_unref(pkt); + } +} + +struct net_pkt *gptp_prepare_sync(int port) +{ + int eth_len = sizeof(struct net_eth_hdr); + struct gptp_port_ds *port_ds; + struct net_eth_hdr *eth; + struct gptp_sync *sync; + struct net_if *iface; + struct net_pkt *pkt; + struct net_buf *frag; + struct gptp_hdr *hdr; + +#if defined(CONFIG_NET_VLAN) + struct net_eth_vlan_hdr *hdr_vlan; + struct ethernet_context *eth_ctx; + bool vlan_enabled = false; +#endif + + NET_ASSERT((port >= GPTP_PORT_START) && (port <= GPTP_PORT_END)); + iface = GPTP_PORT_IFACE(port); + NET_ASSERT(iface); + +#if defined(CONFIG_NET_VLAN) + eth_ctx = net_if_l2_data(iface); + if (eth_ctx->vlan_enabled && + net_eth_get_vlan_tag(iface) != NET_VLAN_TAG_UNSPEC) { + eth_len = sizeof(struct net_eth_vlan_hdr); + vlan_enabled = true; + } +#endif + + pkt = net_pkt_get_reserve_tx(0, NET_BUF_TIMEOUT); + if (!pkt) { + goto fail; + } + + frag = net_pkt_get_reserve_tx_data(eth_len, NET_BUF_TIMEOUT); + if (!frag) { + goto fail; + } + + net_pkt_frag_add(pkt, frag); + net_pkt_set_iface(pkt, iface); + net_pkt_set_family(pkt, AF_UNSPEC); + net_pkt_set_priority(pkt, NET_PRIORITY_CA); + + net_pkt_set_ll_reserve(pkt, eth_len); + +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); + } +#endif + + port_ds = GPTP_PORT_DS(port); + sync = GPTP_SYNC(pkt); + hdr = GPTP_HDR(pkt); + eth = NET_ETH_HDR(pkt); + + /* + * Header configuration. + * + * Some fields are set by gptp_md_sync_send_prepare(). + */ + hdr->transport_specific = GPTP_TRANSPORT_802_1_AS; + hdr->message_type = GPTP_SYNC_MESSAGE; + hdr->ptp_version = GPTP_VERSION; + hdr->sequence_id = htons(port_ds->sync_seq_id); + hdr->domain_number = 0; + hdr->correction_field = 0; + hdr->flags.octets[0] = GPTP_FLAG_TWO_STEP; + hdr->flags.octets[1] = GPTP_FLAG_PTP_TIMESCALE; + hdr->message_length = htons(sizeof(struct gptp_hdr) + + sizeof(struct gptp_sync)); + hdr->control = GPTP_SYNC_CONTROL_VALUE; + + /* Clear reserved fields. */ + hdr->reserved0 = 0; + hdr->reserved1 = 0; + hdr->reserved2 = 0; + + /* Ethernet configuration. */ +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + hdr_vlan->vlan.tpid = htons(NET_ETH_PTYPE_VLAN); + hdr_vlan->vlan.tci = htons(net_eth_get_vlan_tag(iface)); + hdr_vlan->type = htons(NET_ETH_PTYPE_PTP); + } else +#endif + { + eth->type = htons(NET_ETH_PTYPE_PTP); + } + + memcpy(ð->src.addr, net_if_get_link_addr(iface)->addr, + sizeof(struct net_eth_addr)); + memcpy(ð->dst.addr, &gptp_multicast_eth_addr, + sizeof(struct net_eth_addr)); + + /* PTP configuration. */ + memset(&sync->reserved, 0, sizeof(sync->reserved)); + + net_buf_add(frag, sizeof(struct gptp_hdr) + sizeof(struct gptp_sync)); + + /* Update sequence number. */ + port_ds->sync_seq_id++; + + return pkt; + +fail: + if (pkt) { + net_pkt_unref(pkt); + } + + return NULL; +} + +struct net_pkt *gptp_prepare_follow_up(int port, struct net_pkt *sync) +{ + int eth_len = sizeof(struct net_eth_hdr); + struct gptp_hdr *hdr, *sync_hdr; + struct gptp_port_ds *port_ds; + struct net_eth_hdr *eth; + struct net_if *iface; + struct net_pkt *pkt; + struct net_buf *frag; + +#if defined(CONFIG_NET_VLAN) + struct net_eth_vlan_hdr *hdr_vlan; + struct ethernet_context *eth_ctx; + bool vlan_enabled = false; +#endif + + NET_ASSERT(sync); + NET_ASSERT((port >= GPTP_PORT_START) && (port <= GPTP_PORT_END)); + iface = GPTP_PORT_IFACE(port); + NET_ASSERT(iface); + +#if defined(CONFIG_NET_VLAN) + eth_ctx = net_if_l2_data(iface); + if (eth_ctx->vlan_enabled && + net_eth_get_vlan_tag(iface) != NET_VLAN_TAG_UNSPEC) { + eth_len = sizeof(struct net_eth_vlan_hdr); + vlan_enabled = true; + } +#endif + + pkt = net_pkt_get_reserve_tx(0, NET_BUF_TIMEOUT); + if (!pkt) { + goto fail; + } + + frag = net_pkt_get_reserve_tx_data(eth_len, NET_BUF_TIMEOUT); + if (!frag) { + goto fail; + } + + net_pkt_frag_add(pkt, frag); + net_pkt_set_iface(pkt, iface); + net_pkt_set_family(pkt, AF_UNSPEC); + net_pkt_set_ll_reserve(pkt, eth_len); + net_pkt_set_priority(pkt, NET_PRIORITY_IC); + +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); + net_pkt_set_vlan_tag(pkt, net_pkt_vlan_tag(sync)); + } +#endif + + port_ds = GPTP_PORT_DS(port); + hdr = GPTP_HDR(pkt); + sync_hdr = GPTP_HDR(sync); + eth = NET_ETH_HDR(pkt); + + /* + * Header configuration. + * + * Some fields are set by gptp_md_follow_up_prepare(). + */ + hdr->transport_specific = GPTP_TRANSPORT_802_1_AS; + hdr->message_type = GPTP_FOLLOWUP_MESSAGE; + hdr->ptp_version = GPTP_VERSION; + hdr->sequence_id = sync_hdr->sequence_id; + hdr->domain_number = 0; + /* Store timestamp value in correction field. */ + hdr->correction_field = gptp_timestamp_to_nsec(&sync->timestamp); + hdr->flags.octets[0] = 0; + hdr->flags.octets[1] = GPTP_FLAG_PTP_TIMESCALE; + hdr->message_length = htons(sizeof(struct gptp_hdr) + + sizeof(struct gptp_follow_up)); + hdr->control = GPTP_FUP_CONTROL_VALUE; + + /* Clear reserved fields. */ + hdr->reserved0 = 0; + hdr->reserved1 = 0; + hdr->reserved2 = 0; + + /* Ethernet configuration. */ +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + hdr_vlan->vlan.tpid = htons(NET_ETH_PTYPE_VLAN); + hdr_vlan->vlan.tci = htons(net_pkt_vlan_tag(pkt)); + hdr_vlan->type = htons(NET_ETH_PTYPE_PTP); + } else +#endif + { + eth->type = htons(NET_ETH_PTYPE_PTP); + } + + memcpy(ð->src.addr, net_if_get_link_addr(iface)->addr, + sizeof(struct net_eth_addr)); + memcpy(ð->dst.addr, &gptp_multicast_eth_addr, + sizeof(struct net_eth_addr)); + + /* PTP configuration will be set by the MDSyncSend state machine. */ + + net_buf_add(frag, sizeof(struct gptp_hdr) + + sizeof(struct gptp_follow_up)); + + return pkt; + +fail: + if (pkt) { + net_pkt_unref(pkt); + } + + return NULL; +} + +struct net_pkt *gptp_prepare_pdelay_req(int port) +{ + int eth_len = sizeof(struct net_eth_hdr); + struct gptp_pdelay_req *req; + struct gptp_port_ds *port_ds; + struct net_eth_hdr *eth; + struct net_if *iface; + struct net_pkt *pkt; + struct net_buf *frag; + struct gptp_hdr *hdr; + +#if defined(CONFIG_NET_VLAN) + struct net_eth_vlan_hdr *hdr_vlan; + struct ethernet_context *eth_ctx; + bool vlan_enabled = false; +#endif + + NET_ASSERT((port >= GPTP_PORT_START) && (port <= GPTP_PORT_END)); + iface = GPTP_PORT_IFACE(port); + NET_ASSERT(iface); + +#if defined(CONFIG_NET_VLAN) + eth_ctx = net_if_l2_data(iface); + if (eth_ctx->vlan_enabled && + net_eth_get_vlan_tag(iface) != NET_VLAN_TAG_UNSPEC) { + eth_len = sizeof(struct net_eth_vlan_hdr); + vlan_enabled = true; + } +#endif + + pkt = net_pkt_get_reserve_tx(0, NET_BUF_TIMEOUT); + if (!pkt) { + goto fail; + } + + frag = net_pkt_get_reserve_tx_data(eth_len, NET_BUF_TIMEOUT); + if (!frag) { + goto fail; + } + + net_pkt_frag_add(pkt, frag); + net_pkt_set_iface(pkt, iface); + net_pkt_set_family(pkt, AF_UNSPEC); + net_pkt_set_ll_reserve(pkt, eth_len); + net_pkt_set_priority(pkt, NET_PRIORITY_CA); + +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); + } +#endif + + port_ds = GPTP_PORT_DS(port); + req = GPTP_PDELAY_REQ(pkt); + hdr = GPTP_HDR(pkt); + eth = NET_ETH_HDR(pkt); + + /* Header configuration. */ + hdr->transport_specific = GPTP_TRANSPORT_802_1_AS; + hdr->message_type = GPTP_PATH_DELAY_REQ_MESSAGE; + hdr->ptp_version = GPTP_VERSION; + hdr->sequence_id = htons(port_ds->pdelay_req_seq_id); + hdr->domain_number = 0; + hdr->correction_field = 0; + hdr->flags.octets[0] = 0; + hdr->flags.octets[1] = GPTP_FLAG_PTP_TIMESCALE; + + hdr->message_length = htons(sizeof(struct gptp_hdr) + + sizeof(struct gptp_pdelay_req)); + hdr->port_id.port_number = htons(port_ds->port_id.port_number); + hdr->control = GPTP_OTHER_CONTROL_VALUE; + hdr->log_msg_interval = port_ds->cur_log_pdelay_req_itv; + + /* Clear reserved fields. */ + hdr->reserved0 = 0; + hdr->reserved1 = 0; + hdr->reserved2 = 0; + + memcpy(&hdr->port_id.clk_id, + &port_ds->port_id.clk_id, GPTP_CLOCK_ID_LEN); + + /* Ethernet configuration. */ +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + hdr_vlan->vlan.tpid = htons(NET_ETH_PTYPE_VLAN); + hdr_vlan->vlan.tci = htons(net_eth_get_vlan_tag(iface)); + hdr_vlan->type = htons(NET_ETH_PTYPE_PTP); + } else +#endif + { + eth->type = htons(NET_ETH_PTYPE_PTP); + } + + memcpy(ð->src.addr, net_if_get_link_addr(iface)->addr, + sizeof(struct net_eth_addr)); + memcpy(ð->dst.addr, &gptp_multicast_eth_addr, + sizeof(struct net_eth_addr)); + + /* PTP configuration. */ + memset(&req->reserved1, 0, sizeof(req->reserved1)); + memset(&req->reserved2, 0, sizeof(req->reserved2)); + + net_buf_add(frag, sizeof(struct gptp_hdr) + + sizeof(struct gptp_pdelay_req)); + + /* Update sequence number. */ + port_ds->pdelay_req_seq_id++; + + return pkt; + +fail: + if (pkt) { + net_pkt_unref(pkt); + } + + return NULL; +} + +struct net_pkt *gptp_prepare_pdelay_resp(int port, + struct net_pkt *req) +{ + struct net_if *iface = net_pkt_iface(req); + int eth_len = sizeof(struct net_eth_hdr); + struct gptp_pdelay_resp *pdelay_resp; + struct net_eth_hdr *eth, *eth_query; + struct gptp_pdelay_req *pdelay_req; + struct gptp_hdr *hdr, *query; + struct gptp_port_ds *port_ds; + struct net_pkt *pkt; + struct net_buf *frag; + +#if defined(CONFIG_NET_VLAN) + struct net_eth_vlan_hdr *hdr_vlan; + struct ethernet_context *eth_ctx; + bool vlan_enabled = false; + + eth_ctx = net_if_l2_data(iface); + if (eth_ctx->vlan_enabled && + net_eth_get_vlan_tag(iface) != NET_VLAN_TAG_UNSPEC) { + eth_len = sizeof(struct net_eth_vlan_hdr); + vlan_enabled = true; + } +#endif + + pkt = net_pkt_get_reserve_tx(0, NET_BUF_TIMEOUT); + if (!pkt) { + goto fail; + } + + frag = net_pkt_get_reserve_tx_data(eth_len, NET_BUF_TIMEOUT); + if (!frag) { + goto fail; + } + + net_pkt_frag_add(pkt, frag); + net_pkt_set_iface(pkt, iface); + net_pkt_set_family(pkt, AF_INET); + net_pkt_set_ll_reserve(pkt, eth_len); + net_pkt_set_priority(pkt, NET_PRIORITY_CA); + +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); + } +#endif + + port_ds = GPTP_PORT_DS(port); + + pdelay_resp = GPTP_PDELAY_RESP(pkt); + hdr = GPTP_HDR(pkt); + eth = NET_ETH_HDR(pkt); + + pdelay_req = GPTP_PDELAY_REQ(req); + query = GPTP_HDR(req); + eth_query = NET_ETH_HDR(req); + + /* Header configuration. */ + hdr->transport_specific = GPTP_TRANSPORT_802_1_AS; + hdr->message_type = GPTP_PATH_DELAY_RESP_MESSAGE; + hdr->ptp_version = GPTP_VERSION; + hdr->sequence_id = query->sequence_id; + hdr->domain_number = query->domain_number; + hdr->correction_field = query->correction_field; + hdr->flags.octets[0] = GPTP_FLAG_TWO_STEP; + hdr->flags.octets[1] = GPTP_FLAG_PTP_TIMESCALE; + + hdr->message_length = htons(sizeof(struct gptp_hdr) + + sizeof(struct gptp_pdelay_resp)); + hdr->port_id.port_number = htons(port_ds->port_id.port_number); + hdr->control = GPTP_OTHER_CONTROL_VALUE; + hdr->log_msg_interval = GPTP_RESP_LOG_MSG_ITV; + + /* Clear reserved fields. */ + hdr->reserved0 = 0; + hdr->reserved1 = 0; + hdr->reserved2 = 0; + + memcpy(&hdr->port_id.clk_id, + &port_ds->port_id.clk_id, + GPTP_CLOCK_ID_LEN); + + /* Ethernet configuration. */ +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + hdr_vlan->vlan.tpid = htons(NET_ETH_PTYPE_VLAN); + hdr_vlan->vlan.tci = htons(net_pkt_vlan_tag(pkt)); + hdr_vlan->type = htons(NET_ETH_PTYPE_PTP); + } else +#endif + { + eth->type = htons(NET_ETH_PTYPE_PTP); + } + + memcpy(ð->dst.addr, &gptp_multicast_eth_addr, + sizeof(struct net_eth_addr)); + memcpy(ð->src.addr, net_if_get_link_addr(iface)->addr, + sizeof(struct net_eth_addr)); + + /* PTP configuration. */ + pdelay_resp->req_receipt_ts_secs_high = 0; + pdelay_resp->req_receipt_ts_secs_low = 0; + pdelay_resp->req_receipt_ts_nsecs = 0; + memcpy(&pdelay_resp->requesting_port_id, + &query->port_id, sizeof(struct gptp_port_identity)); + + net_buf_add(frag, sizeof(struct gptp_hdr) + + sizeof(struct gptp_pdelay_resp)); + + return pkt; + +fail: + if (pkt) { + net_pkt_unref(pkt); + } + + return NULL; +} + +struct net_pkt *gptp_prepare_pdelay_follow_up(int port, + struct net_pkt *resp) +{ + struct net_if *iface = net_pkt_iface(resp); + int eth_len = sizeof(struct net_eth_hdr); + struct gptp_pdelay_resp_follow_up *follow_up; + struct net_eth_hdr *eth, *eth_query; + struct gptp_pdelay_resp *pdelay_resp; + struct gptp_hdr *hdr, *resp_hdr; + struct gptp_port_ds *port_ds; + struct net_pkt *pkt; + struct net_buf *frag; + +#if defined(CONFIG_NET_VLAN) + struct net_eth_vlan_hdr *hdr_vlan; + struct ethernet_context *eth_ctx; + bool vlan_enabled = false; + + eth_ctx = net_if_l2_data(iface); + if (eth_ctx->vlan_enabled && + net_eth_get_vlan_tag(iface) != NET_VLAN_TAG_UNSPEC) { + eth_len = sizeof(struct net_eth_vlan_hdr); + vlan_enabled = true; + } +#endif + + pkt = net_pkt_get_reserve_tx(0, NET_BUF_TIMEOUT); + if (!pkt) { + goto fail; + } + + frag = net_pkt_get_reserve_tx_data(eth_len, NET_BUF_TIMEOUT); + if (!frag) { + goto fail; + } + + net_pkt_frag_add(pkt, frag); + net_pkt_set_iface(pkt, iface); + net_pkt_set_family(pkt, AF_INET); + net_pkt_set_ll_reserve(pkt, eth_len); + net_pkt_set_priority(pkt, NET_PRIORITY_IC); + +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); + net_pkt_set_vlan_tag(pkt, net_pkt_vlan_tag(resp)); + } +#endif + + port_ds = GPTP_PORT_DS(port); + + follow_up = GPTP_PDELAY_RESP_FOLLOWUP(pkt); + hdr = GPTP_HDR(pkt); + eth = NET_ETH_HDR(pkt); + + pdelay_resp = GPTP_PDELAY_RESP(resp); + resp_hdr = GPTP_HDR(resp); + eth_query = NET_ETH_HDR(resp); + + /* Header configuration. */ + hdr->transport_specific = GPTP_TRANSPORT_802_1_AS; + hdr->ptp_version = GPTP_VERSION; + hdr->message_type = GPTP_PATH_DELAY_FOLLOWUP_MESSAGE; + hdr->sequence_id = resp_hdr->sequence_id; + hdr->domain_number = resp_hdr->domain_number; + hdr->correction_field = 0; + hdr->message_length = htons(sizeof(struct gptp_hdr) + + sizeof(struct gptp_pdelay_resp_follow_up)); + hdr->port_id.port_number = htons(port_ds->port_id.port_number); + hdr->control = GPTP_OTHER_CONTROL_VALUE; + hdr->log_msg_interval = GPTP_RESP_LOG_MSG_ITV; + + hdr->flags.octets[0] = 0; + hdr->flags.octets[1] = GPTP_FLAG_PTP_TIMESCALE; + + /* Clear reserved fields. */ + hdr->reserved0 = 0; + hdr->reserved1 = 0; + hdr->reserved2 = 0; + + memcpy(&hdr->port_id.clk_id, + &port_ds->port_id.clk_id, + GPTP_CLOCK_ID_LEN); + + /* Ethernet configuration. */ +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + hdr_vlan->vlan.tpid = htons(NET_ETH_PTYPE_VLAN); + hdr_vlan->vlan.tci = htons(net_pkt_vlan_tag(pkt)); + hdr_vlan->type = htons(NET_ETH_PTYPE_PTP); + } else +#endif + { + eth->type = htons(NET_ETH_PTYPE_PTP); + } + + memcpy(ð->dst.addr, &gptp_multicast_eth_addr, + sizeof(struct net_eth_addr)); + memcpy(ð->src.addr, net_if_get_link_addr(iface)->addr, + sizeof(struct net_eth_addr)); + + /* PTP configuration. */ + follow_up->resp_orig_ts_secs_high = 0; + follow_up->resp_orig_ts_secs_low = 0; + follow_up->resp_orig_ts_nsecs = 0; + memcpy(&follow_up->requesting_port_id, &pdelay_resp->requesting_port_id, + sizeof(struct gptp_port_identity)); + + net_buf_add(frag, sizeof(struct gptp_hdr) + + sizeof(struct gptp_pdelay_resp_follow_up)); + + return pkt; + +fail: + if (pkt) { + net_pkt_unref(pkt); + } + + return NULL; +} + +struct net_pkt *gptp_prepare_announce(int port) +{ + int eth_len = sizeof(struct net_eth_hdr); + struct gptp_global_ds *global_ds; + struct gptp_port_ds *port_ds; + struct gptp_announce *ann; + struct net_eth_hdr *eth; + struct net_if *iface; + struct net_pkt *pkt; + struct net_buf *frag; + struct gptp_hdr *hdr; + +#if defined(CONFIG_NET_VLAN) + struct net_eth_vlan_hdr *hdr_vlan; + struct ethernet_context *eth_ctx; + bool vlan_enabled = false; +#endif + + NET_ASSERT((port >= GPTP_PORT_START) && (port <= GPTP_PORT_END)); + global_ds = GPTP_GLOBAL_DS(); + iface = GPTP_PORT_IFACE(port); + NET_ASSERT(iface); + +#if defined(CONFIG_NET_VLAN) + eth_ctx = net_if_l2_data(iface); + if (eth_ctx->vlan_enabled && + net_eth_get_vlan_tag(iface) != NET_VLAN_TAG_UNSPEC) { + eth_len = sizeof(struct net_eth_vlan_hdr); + vlan_enabled = true; + } +#endif + + pkt = net_pkt_get_reserve_tx(0, NET_BUF_TIMEOUT); + if (!pkt) { + goto fail; + } + + frag = net_pkt_get_reserve_tx_data(eth_len, NET_BUF_TIMEOUT); + if (!frag) { + goto fail; + } + + net_pkt_frag_add(pkt, frag); + net_pkt_set_iface(pkt, iface); + net_pkt_set_family(pkt, AF_INET); + net_pkt_set_ll_reserve(pkt, eth_len); + net_pkt_set_priority(pkt, NET_PRIORITY_IC); + +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); + } +#endif + + eth = NET_ETH_HDR(pkt); + hdr = GPTP_HDR(pkt); + ann = GPTP_ANNOUNCE(pkt); + port_ds = GPTP_PORT_DS(port); + + /* Ethernet configuration. */ +#if defined(CONFIG_NET_VLAN) + if (vlan_enabled) { + hdr_vlan->vlan.tpid = htons(NET_ETH_PTYPE_VLAN); + hdr_vlan->vlan.tci = htons(net_eth_get_vlan_tag(iface)); + hdr_vlan->type = htons(NET_ETH_PTYPE_PTP); + } else +#endif + { + eth->type = htons(NET_ETH_PTYPE_PTP); + } + + memcpy(ð->src.addr, net_if_get_link_addr(iface)->addr, + sizeof(struct net_eth_addr)); + memcpy(ð->dst.addr, &gptp_multicast_eth_addr, + sizeof(struct net_eth_addr)); + + hdr->message_type = GPTP_ANNOUNCE_MESSAGE; + hdr->transport_specific = GPTP_TRANSPORT_802_1_AS; + hdr->ptp_version = GPTP_VERSION; + + hdr->domain_number = 0; + hdr->flags.octets[0] = 0; + /* Copy leap61, leap59, current UTC offset valid, time traceable and + * frequency traceable flags. + */ + hdr->flags.octets[1] = + global_ds->global_flags.octets[1] | GPTP_FLAG_PTP_TIMESCALE; + + hdr->correction_field = 0; + + memcpy(hdr->port_id.clk_id, + GPTP_DEFAULT_DS()->clk_id, + GPTP_CLOCK_ID_LEN); + hdr->port_id.port_number = htons(port); + hdr->control = GPTP_OTHER_CONTROL_VALUE; + hdr->log_msg_interval = port_ds->cur_log_announce_itv; + + /* Clear reserved fields. */ + hdr->reserved0 = 0; + hdr->reserved1 = 0; + hdr->reserved2 = 0; + + ann->cur_utc_offset = global_ds->current_utc_offset; + ann->time_source = global_ds->time_source; + + memcpy(&ann->root_system_id, + &GPTP_PORT_BMCA_DATA(port)->master_priority, + sizeof(struct gptp_root_system_identity)); + ann->steps_removed = global_ds->master_steps_removed; + hdr->sequence_id = htons(port_ds->announce_seq_id); + port_ds->announce_seq_id++; + + ann->tlv.type = GPTP_ANNOUNCE_MSG_PATH_SEQ_TYPE; + + /* Clear reserved fields. */ + memset(ann->reserved1, 0, sizeof(ann->reserved1)); + ann->reserved2 = 0; + + hdr->message_length = htons(sizeof(struct gptp_hdr) + + sizeof(struct gptp_announce) - 8 + + ntohs(global_ds->path_trace.len)); + + net_buf_add(frag, sizeof(struct gptp_hdr) + + sizeof(struct gptp_announce) - 8); + + ann->tlv.len = global_ds->path_trace.len; + + if (net_pkt_append(pkt, ntohs(global_ds->path_trace.len), + &global_ds->path_trace.path_sequence[0][0], + NET_BUF_TIMEOUT) < + ntohs(global_ds->path_trace.len)) { + goto fail; + } + + return pkt; + +fail: + if (pkt) { + net_pkt_unref(pkt); + } + + return NULL; +} + +void gptp_handle_sync(int port, struct net_pkt *pkt) +{ + struct gptp_sync_rcv_state *state; + struct gptp_port_ds *port_ds; + struct gptp_hdr *hdr; + u64_t upstream_sync_itv; + s32_t duration; + + state = &GPTP_PORT_STATE(port)->sync_rcv; + port_ds = GPTP_PORT_DS(port); + hdr = GPTP_HDR(state->rcvd_sync_ptr); + + upstream_sync_itv = NSEC_PER_SEC * GPTP_POW2(hdr->log_msg_interval); + + /* Convert ns to ms. */ + duration = (upstream_sync_itv / 1000000); + + /* Start timeout timer. */ + k_timer_start(&state->follow_up_discard_timer, + duration, 0); +} + +int gptp_handle_follow_up(int port, struct net_pkt *pkt) +{ + struct gptp_sync_rcv_state *state; + struct gptp_hdr *sync_hdr, *hdr; + struct gptp_port_ds *port_ds; + + state = &GPTP_PORT_STATE(port)->sync_rcv; + port_ds = GPTP_PORT_DS(port); + + sync_hdr = GPTP_HDR(state->rcvd_sync_ptr); + hdr = GPTP_HDR(pkt); + + if (sync_hdr->sequence_id != hdr->sequence_id) { + NET_WARN("Follow up sequence id does not match sync"); + return -1; + } + + GPTP_STATS_INC(port, rx_fup_count); + + return 0; +} + +void gptp_handle_pdelay_req(int port, struct net_pkt *pkt) +{ + struct net_pkt *reply; + + GPTP_STATS_INC(port, rx_pdelay_req_count); + + /* Prepare response and send */ + reply = gptp_prepare_pdelay_resp(port, pkt); + if (reply) { + if (!ts_cb_registered) { + net_if_register_timestamp_cb( + &pdelay_response_timestamp_cb, + net_pkt_iface(pkt), + gptp_pdelay_response_timestamp_callback); + + ts_cb_registered = true; + } + + /* TS thread will send this back to us so increment ref count + * so that the packet is not removed when sending it. + * This will be unref'ed by timestamp callback in + * gptp_pdelay_response_timestamp_callback() + */ + net_pkt_ref(reply); + + gptp_send_pdelay_resp(port, reply, net_pkt_timestamp(pkt)); + } +} + +int gptp_handle_pdelay_resp(int port, struct net_pkt *pkt) +{ + struct gptp_pdelay_req_state *state; + struct gptp_default_ds *default_ds; + struct gptp_pdelay_resp *resp; + struct gptp_port_ds *port_ds; + struct net_eth_hdr *eth; + struct gptp_hdr *hdr, *req_hdr; + + eth = NET_ETH_HDR(pkt); + hdr = GPTP_HDR(pkt); + resp = GPTP_PDELAY_RESP(pkt); + state = &GPTP_PORT_STATE(port)->pdelay_req; + port_ds = GPTP_PORT_DS(port); + default_ds = GPTP_DEFAULT_DS(); + + if (!state->tx_pdelay_req_ptr) { + goto reset; + } + + req_hdr = GPTP_HDR(state->tx_pdelay_req_ptr); + + /* Check clock identity. */ + if (memcmp(default_ds->clk_id, resp->requesting_port_id.clk_id, + GPTP_CLOCK_ID_LEN)) { + NET_WARN("Requesting Clock Identity does not match"); + goto reset; + } + if (memcmp(default_ds->clk_id, hdr->port_id.clk_id, + GPTP_CLOCK_ID_LEN) == 0) { + NET_WARN("Source Clock Identity is local Clock Identity"); + goto reset; + } + + /* Check port number. */ + if (resp->requesting_port_id.port_number != htons(port)) { + NET_WARN("Requesting Port Number does not match"); + goto reset; + } + + /* Check sequence id. */ + if (hdr->sequence_id != req_hdr->sequence_id) { + NET_WARN("Sequence Id does not match"); + goto reset; + } + + GPTP_STATS_INC(port, rx_pdelay_resp_count); + + return 0; + +reset: + return -1; +} + +int gptp_handle_pdelay_follow_up(int port, struct net_pkt *pkt) +{ + struct gptp_pdelay_resp_follow_up *follow_up; + struct gptp_hdr *hdr, *req_hdr, *resp_hdr; + struct gptp_pdelay_req_state *state; + struct gptp_default_ds *default_ds; + struct gptp_port_ds *port_ds; + struct net_eth_hdr *eth; + + eth = NET_ETH_HDR(pkt); + hdr = GPTP_HDR(pkt); + follow_up = GPTP_PDELAY_RESP_FOLLOWUP(pkt); + state = &GPTP_PORT_STATE(port)->pdelay_req; + port_ds = GPTP_PORT_DS(port); + default_ds = GPTP_DEFAULT_DS(); + + if (!state->tx_pdelay_req_ptr) { + goto reset; + } + + req_hdr = GPTP_HDR(state->tx_pdelay_req_ptr); + + if (!state->rcvd_pdelay_resp_ptr) { + goto reset; + } + + resp_hdr = GPTP_HDR(state->rcvd_pdelay_resp_ptr); + + /* Check clock identity. */ + if (memcmp(default_ds->clk_id, follow_up->requesting_port_id.clk_id, + GPTP_CLOCK_ID_LEN)) { + NET_WARN("Requesting Clock Identity does not match"); + goto reset; + } + + if (memcmp(default_ds->clk_id, hdr->port_id.clk_id, + GPTP_CLOCK_ID_LEN) == 0) { + NET_WARN("Source Clock Identity is local Clock Identity"); + goto reset; + } + + /* Check port number. */ + if (follow_up->requesting_port_id.port_number != htons(port)) { + NET_WARN("Requesting Port Number does not match"); + goto reset; + } + + /* Check sequence id. */ + if (hdr->sequence_id != req_hdr->sequence_id) { + NET_WARN("Sequence ID does not match"); + goto reset; + } + + /* Check source port. */ + if (memcmp(&hdr->port_id, &resp_hdr->port_id, + sizeof(hdr->port_id)) != 0) { + NET_WARN("pDelay response and follow up port IDs do not match"); + goto reset; + } + + GPTP_STATS_INC(port, rx_fup_count); + + return 0; + +reset: + return -1; +} + +void gptp_handle_signaling(int port, struct net_pkt *pkt) +{ + struct gptp_port_ds *port_ds; + struct gptp_signaling *sig; + + sig = GPTP_SIGNALING(pkt); + port_ds = GPTP_PORT_DS(port); + + /* If time-synchronization not enabled, drop packet. */ + if (!port_ds->ptt_port_enabled) { + return; + } + + /* pDelay interval. */ + gptp_update_pdelay_req_interval(port, sig->tlv.link_delay_itv); + + /* Sync interval. */ + gptp_update_sync_interval(port, sig->tlv.time_sync_itv); + + /* Announce interval. */ + gptp_update_announce_interval(port, sig->tlv.announce_itv); + + port_ds->compute_neighbor_rate_ratio = + sig->tlv.compute_neighbor_rate_ratio; + port_ds->compute_neighbor_prop_delay = + sig->tlv.compute_neighbor_prop_delay; +} + +void gptp_send_sync(int port, struct net_pkt *pkt) +{ + if (sync_cb_registered) { + net_if_register_timestamp_cb(&sync_timestamp_cb, + net_pkt_iface(pkt), + gptp_sync_timestamp_callback); + sync_cb_registered = true; + } + + GPTP_STATS_INC(port, tx_sync_count); + + /* TS thread will send this back to us so increment ref count + * so that the packet is not removed when sending it. + * This will be unref'ed by timestamp callback in + * gptp_sync_timestamp_callback() + */ + net_pkt_ref(pkt); + + PRINT_INFO("SYNC", pkt); + + net_if_queue_tx(net_pkt_iface(pkt), pkt); +} + +void gptp_send_follow_up(int port, struct net_pkt *pkt) +{ + GPTP_STATS_INC(port, tx_fup_count); + + PRINT_INFO("FOLLOWUP", pkt); + + net_if_queue_tx(net_pkt_iface(pkt), pkt); +} + +void gptp_send_announce(int port, struct net_pkt *pkt) +{ + GPTP_STATS_INC(port, tx_announce_count); + + PRINT_INFO("ANNOUNCE", pkt); + + net_if_queue_tx(net_pkt_iface(pkt), pkt); +} + +void gptp_send_pdelay_req(int port) +{ + struct gptp_pdelay_req_state *state; + struct gptp_port_ds *port_ds; + struct net_pkt *pkt; + + NET_ASSERT((port >= GPTP_PORT_START) && (port <= GPTP_PORT_END)); + state = &GPTP_PORT_STATE(port)->pdelay_req; + port_ds = GPTP_PORT_DS(port); + + pkt = gptp_prepare_pdelay_req(port); + if (pkt) { + /* Keep the buffer alive until pdelay_rate_ratio is computed. */ + state->tx_pdelay_req_ptr = net_pkt_ref(pkt); + + GPTP_STATS_INC(port, tx_pdelay_req_count); + + PRINT_INFO("PATH_DELAY_REQ", pkt); + + net_if_queue_tx(net_pkt_iface(pkt), pkt); + } else { + NET_ERR("Failed to prepare pdelay request"); + } +} + +void gptp_send_pdelay_resp(int port, struct net_pkt *pkt, + struct net_ptp_time *treq) +{ + struct gptp_pdelay_resp *resp; + struct gptp_hdr *hdr; + + hdr = GPTP_HDR(pkt); + + /* No Fractional nsec .*/ + hdr->correction_field = 0; + + resp = GPTP_PDELAY_RESP(pkt); + resp->req_receipt_ts_secs_high = htons(treq->_sec.high); + resp->req_receipt_ts_secs_low = htonl(treq->_sec.low); + resp->req_receipt_ts_nsecs = htonl(treq->nanosecond); + + GPTP_STATS_INC(port, tx_pdelay_resp_count); + + PRINT_INFO("PATH_DELAY_RESP", pkt); + + net_if_queue_tx(net_pkt_iface(pkt), pkt); +} + +void gptp_send_pdelay_follow_up(int port, struct net_pkt *pkt, + struct net_ptp_time *tresp) +{ + struct gptp_pdelay_resp_follow_up *follow_up; + struct gptp_hdr *hdr; + + hdr = GPTP_HDR(pkt); + + /* No Fractional nsec .*/ + hdr->correction_field = 0; + + follow_up = GPTP_PDELAY_RESP_FOLLOWUP(pkt); + follow_up->resp_orig_ts_secs_high = htons(tresp->_sec.high); + follow_up->resp_orig_ts_secs_low = htonl(tresp->_sec.low); + follow_up->resp_orig_ts_nsecs = htonl(tresp->nanosecond); + + GPTP_STATS_INC(port, tx_pdelay_resp_fup_count); + + PRINT_INFO("PATH_DELAY_FOLLOWUP", pkt); + + net_if_queue_tx(net_pkt_iface(pkt), pkt); +} diff --git a/subsys/net/ip/l2/gptp/gptp_mi.c b/subsys/net/ip/l2/gptp/gptp_mi.c new file mode 100644 index 0000000000000..9a473a71a8acc --- /dev/null +++ b/subsys/net/ip/l2/gptp/gptp_mi.c @@ -0,0 +1,1494 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if defined(CONFIG_NET_DEBUG_GPTP) +#define SYS_LOG_DOMAIN "net/gptp" +#define NET_LOG_ENABLED 1 +#endif + +#include +#include +#include + +#include "gptp_private.h" + +#include + +static void gptp_mi_half_sync_itv_timeout(struct k_timer *timer) +{ + struct gptp_pss_send_state *state; + int port; + + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + state = &GPTP_PORT_STATE(port)->pss_send; + if (timer == &state->half_sync_itv_timer) { + if (!state->half_sync_itv_timer_expired) { + state->half_sync_itv_timer_expired = true; + } else { + /* We do not need the timer anymore. */ + k_timer_stop(timer); + + state->sync_itv_timer_expired = true; + } + } + } +} + +static void gptp_mi_sync_receipt_timeout(struct k_timer *timer) +{ + struct gptp_pss_send_state *state_send; + struct gptp_pss_rcv_state *state_rcv; + int port; + + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + state_send = &GPTP_PORT_STATE(port)->pss_send; + if (timer == &state_send->sync_receipt_timeout_timer) { + state_send->sync_receipt_timeout_timer_expired = true; + } + + state_rcv = &GPTP_PORT_STATE(port)->pss_rcv; + if (timer == &state_rcv->sync_receipt_timeout_timer) { + state_rcv->sync_receipt_timeout_timer_expired = true; + } + + GPTP_STATS_INC(port, sync_receipt_timeout_count); + } +} + +static void gptp_mi_init_port_sync_sync_rcv_sm(int port) +{ + struct gptp_pss_rcv_state *pss_rcv; + + pss_rcv = &GPTP_PORT_STATE(port)->pss_rcv; + memset(pss_rcv, 0, sizeof(struct gptp_pss_rcv_state)); + pss_rcv->state = GPTP_PSS_RCV_DISCARD; + + k_timer_init(&pss_rcv->sync_receipt_timeout_timer, + gptp_mi_sync_receipt_timeout, NULL); + +} + +static void gptp_mi_init_port_sync_sync_send_sm(int port) +{ + struct gptp_pss_send_state *pss_send; + + pss_send = &GPTP_PORT_STATE(port)->pss_send; + memset(pss_send, 0, sizeof(struct gptp_pss_send_state)); + + k_timer_init(&pss_send->half_sync_itv_timer, + gptp_mi_half_sync_itv_timeout, NULL); + k_timer_init(&pss_send->sync_receipt_timeout_timer, + gptp_mi_sync_receipt_timeout, NULL); + + pss_send->state = GPTP_PSS_SEND_TRANSMIT_INIT; +} + +static void gptp_mi_init_site_sync_sync_sm(void) +{ + struct gptp_site_sync_sync_state *site_ss; + + site_ss = &GPTP_STATE()->site_ss; + memset(site_ss, 0, sizeof(struct gptp_site_sync_sync_state)); + site_ss->state = GPTP_SSS_INITIALIZING; +} + +static void gptp_mi_init_clock_slave_sync_sm(void) +{ + struct gptp_clk_slave_sync_state *clk_ss; + + clk_ss = &GPTP_STATE()->clk_slave_sync; + memset(clk_ss, 0, sizeof(struct gptp_clk_slave_sync_state)); + clk_ss->state = GPTP_CLK_SLAVE_SYNC_INITIALIZING; +} + +static void gptp_mi_init_port_announce_rcv_sm(int port) +{ + struct gptp_port_announce_receive_state *pa_rcv; + + pa_rcv = &GPTP_PORT_STATE(port)->pa_rcv; + memset(pa_rcv, 0, sizeof(struct gptp_port_announce_receive_state)); + pa_rcv->state = GPTP_PA_RCV_DISCARD; + +} + +static void gptp_mi_init_clock_master_sync_rcv_sm(void) +{ + struct gptp_clk_master_sync_state *cms_rcv; + + cms_rcv = &GPTP_STATE()->clk_master_sync_receive; + memset(cms_rcv, 0, sizeof(struct gptp_clk_master_sync_state)); + cms_rcv->state = GPTP_CMS_RCV_INITIALIZING; +} + +static void announce_timer_handler(struct k_timer *timer) +{ + int port; + struct gptp_port_announce_information_state *state; + + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + state = &GPTP_PORT_STATE(port)->pa_info; + if (timer == &state->ann_rcpt_expiry_timer) { + state->ann_expired = true; + GPTP_STATS_INC(port, announce_receipt_timeout_count); + break; + } + } +} + +static void gptp_mi_init_port_announce_info_sm(int port) +{ + struct gptp_port_announce_information_state *state; + + state = &GPTP_PORT_STATE(port)->pa_info; + + k_timer_init(&state->ann_rcpt_expiry_timer, + announce_timer_handler, NULL); + + state->ann_expired = false; + state->state = GPTP_PA_INFO_DISABLED; +} + +static void gptp_mi_init_bmca_data(int port) +{ + struct gptp_port_bmca_data *bmca_data; + + bmca_data = GPTP_PORT_BMCA_DATA(port); + + memset(bmca_data, 0, sizeof(struct gptp_port_bmca_data)); + + gptp_set_time_itv(&bmca_data->announce_interval, 1, + CONFIG_NET_GPTP_INIT_LOG_ANNOUNCE_ITV); + + memset(&bmca_data->port_priority, 0xFF, + sizeof(struct gptp_priority_vector)); + memset(&bmca_data->master_priority, 0xFF, + sizeof(struct gptp_priority_vector)); +} + +static void announce_periodic_timer_handler(struct k_timer *timer) +{ + int port; + struct gptp_port_announce_transmit_state *state; + + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + state = &GPTP_PORT_STATE(port)->pa_transmit; + if (timer == &state->ann_send_periodic_timer) { + state->ann_trigger = true; + break; + } + } +} + +static void gptp_mi_init_port_announce_transmit_sm(int port) +{ + struct gptp_port_announce_transmit_state *state; + + state = &GPTP_PORT_STATE(port)->pa_transmit; + + k_timer_init(&state->ann_send_periodic_timer, + announce_periodic_timer_handler, NULL); + + state->ann_trigger = false; + state->state = GPTP_PA_TRANSMIT_INIT; +} + +static void gptp_mi_init_port_role_selection_sm(void) +{ + GPTP_STATE()->pr_sel.state = GPTP_PR_SELECTION_INIT_BRIDGE; +} + +void gptp_mi_init_state_machine(void) +{ + int port; + + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + gptp_mi_init_port_sync_sync_rcv_sm(port); + gptp_mi_init_port_sync_sync_send_sm(port); + gptp_mi_init_port_announce_rcv_sm(port); + gptp_mi_init_port_announce_info_sm(port); + gptp_mi_init_port_announce_transmit_sm(port); + gptp_mi_init_bmca_data(port); + } + + gptp_mi_init_site_sync_sync_sm(); + gptp_mi_init_clock_slave_sync_sm(); + gptp_mi_init_port_role_selection_sm(); + gptp_mi_init_clock_master_sync_rcv_sm(); +} + + +static void gptp_mi_pss_rcv_compute(int port) +{ + struct gptp_pss_rcv_state *state; + struct gptp_mi_port_sync_sync *pss; + struct gptp_md_sync_info *sync_rcv; + struct gptp_port_ds *port_ds; + + state = &GPTP_PORT_STATE(port)->pss_rcv; + pss = &state->pss; + sync_rcv = &state->sync_rcv; + port_ds = GPTP_PORT_DS(port); + + state->rate_ratio = sync_rcv->rate_ratio; + state->rate_ratio += (port_ds->neighbor_rate_ratio - 1.0); + + port_ds->sync_receipt_timeout_time_itv = port_ds->sync_receipt_timeout; + port_ds->sync_receipt_timeout_time_itv *= NSEC_PER_SEC; + port_ds->sync_receipt_timeout_time_itv *= + GPTP_POW2(16 + sync_rcv->log_msg_interval); + + pss->local_port_number = port; + + memcpy(&pss->sync_info, sync_rcv, sizeof(struct gptp_md_sync_info)); + + pss->sync_receipt_timeout_time = GPTP_GET_CURRENT_TIME_NANOSECOND(); + pss->sync_receipt_timeout_time += + (port_ds->sync_receipt_timeout_time_itv >> 16); + + pss->sync_info.rate_ratio = state->rate_ratio; +} + +static void gptp_mi_pss_rcv_state_machine(int port) +{ + struct gptp_pss_rcv_state *state; + struct gptp_site_sync_sync_state *site_ss_state; + struct gptp_port_ds *port_ds; + s32_t duration; + + state = &GPTP_PORT_STATE(port)->pss_rcv; + site_ss_state = &GPTP_STATE()->site_ss; + port_ds = GPTP_PORT_DS(port); + + if ((!port_ds->ptt_port_enabled) || !port_ds->as_capable) { + state->rcvd_md_sync = false; + state->state = GPTP_PSS_RCV_DISCARD; + return; + } + + switch (state->state) { + case GPTP_PSS_RCV_DISCARD: + k_timer_stop(&state->sync_receipt_timeout_timer); + state->sync_receipt_timeout_timer_expired = false; + case GPTP_PSS_RCV_RECEIVED_SYNC: + if (state->rcvd_md_sync) { + state->rcvd_md_sync = false; + gptp_mi_pss_rcv_compute(port); + + state->state = GPTP_PSS_RCV_RECEIVED_SYNC; + + site_ss_state->pss_rcv_ptr = &state->pss; + site_ss_state->rcvd_pss = true; + + k_timer_stop(&state->sync_receipt_timeout_timer); + state->sync_receipt_timeout_timer_expired = false; + + if (GPTP_GLOBAL_DS()->gm_present) { + duration = + (port_ds->sync_receipt_timeout_time_itv + >> 16) / (NSEC_PER_SEC/MSEC_PER_SEC); + k_timer_start(&state-> + sync_receipt_timeout_timer, + duration, + 0); + } + } + + break; + } +} + +static void gptp_mi_pss_store_last_pss(int port) +{ + struct gptp_pss_send_state *state; + struct gptp_mi_port_sync_sync *pss_ptr; + struct gptp_md_sync_info *sync_info; + struct gptp_port_ds *port_ds; + + state = &GPTP_PORT_STATE(port)->pss_send; + port_ds = GPTP_PORT_DS(port); + pss_ptr = state->pss_sync_ptr; + sync_info = &pss_ptr->sync_info; + + state->last_rcvd_port_num = pss_ptr->local_port_number; + + memcpy(&state->last_precise_orig_ts, &sync_info->precise_orig_ts, + sizeof(struct net_ptp_time)); + memcpy(&state->last_gm_phase_change, &sync_info->last_gm_phase_change, + sizeof(struct gptp_scaled_ns)); + + state->last_follow_up_correction_field = + sync_info->follow_up_correction_field; + state->last_rate_ratio = sync_info->rate_ratio; + state->last_upstream_tx_time = sync_info->upstream_tx_time; + state->last_gm_time_base_indicator = sync_info->gm_time_base_indicator; + state->last_gm_freq_change = sync_info->last_gm_freq_change; +} + +static void gptp_mi_pss_send_md_sync_send(int port) +{ + struct gptp_pss_send_state *state; + struct gptp_mi_port_sync_sync *pss_ptr; + struct gptp_port_ds *port_ds; + struct gptp_sync_send_state *sync_send; + + state = &GPTP_PORT_STATE(port)->pss_send; + port_ds = GPTP_PORT_DS(port); + pss_ptr = state->pss_sync_ptr; + sync_send = &GPTP_PORT_STATE(port)->sync_send; + + memcpy(&state->sync_send, &pss_ptr->sync_info, + sizeof(struct gptp_md_sync_info)); + + sync_send->sync_send_ptr = &state->sync_send; + sync_send->rcvd_md_sync = true; +} + +static void gptp_mi_pss_send_state_machine(int port) +{ + struct gptp_pss_send_state *state; + struct gptp_port_ds *port_ds; + struct gptp_global_ds *global_ds; + s32_t duration; + + global_ds = GPTP_GLOBAL_DS(); + state = &GPTP_PORT_STATE(port)->pss_send; + port_ds = GPTP_PORT_DS(port); + + /* Reset interval as defined in LinkDelaySyncIntervalSetting state + * machine. + */ + if (port_ds->ptt_port_enabled && !port_ds->prev_ptt_port_enabled) { + gptp_update_sync_interval(port, GPTP_ITV_SET_TO_INIT); + } + + if (state->rcvd_pss_sync && ((!port_ds->ptt_port_enabled) || + !port_ds->as_capable)) { + state->rcvd_pss_sync = false; + state->state = GPTP_PSS_SEND_TRANSMIT_INIT; + + return; + } + + switch (state->state) { + case GPTP_PSS_SEND_TRANSMIT_INIT: + case GPTP_PSS_SEND_SYNC_RECEIPT_TIMEOUT: + if (state->rcvd_pss_sync && + (state->pss_sync_ptr->local_port_number != port) && + (global_ds->selected_role[port] == GPTP_PORT_MASTER)) { + state->state = GPTP_PSS_SEND_SEND_MD_SYNC; + } else { + break; + } + /* Fallthrough. */ + case GPTP_PSS_SEND_SEND_MD_SYNC: + if (state->rcvd_pss_sync) { + gptp_mi_pss_store_last_pss(port); + state->rcvd_pss_sync = false; + } + + /* Make sure no previous timer is still running. */ + k_timer_stop(&state->half_sync_itv_timer); + k_timer_stop(&state->sync_receipt_timeout_timer); + + state->half_sync_itv_timer_expired = false; + state->sync_itv_timer_expired = false; + state->sync_receipt_timeout_timer_expired = false; + + /* Convert ns to ms. */ + duration = gptp_uscaled_ns_to_timer_ms( + &port_ds->half_sync_itv); + + /* Start 0.5 * syncInterval timeout timer. */ + k_timer_start(&state->half_sync_itv_timer, + duration, duration); + + gptp_mi_pss_send_md_sync_send(port); + + /* Fallthrough. */ + case GPTP_PSS_SEND_SET_SYNC_RECEIPT_TIMEOUT: + /* Test conditions have been slightly rearranged compared to + * their definitions in the standard in order not to test + * AsCapable and pttPortEnabled when not needed (they are + * already tested with rcvdPSSync for the reset of this state + * machine). + */ + if ((global_ds->selected_role[port] == GPTP_PORT_MASTER) && + ((state->rcvd_pss_sync && + state->half_sync_itv_timer_expired && + state->pss_sync_ptr->local_port_number != port) || + (state->sync_itv_timer_expired && + (state->last_rcvd_port_num != port) && + port_ds->as_capable && port_ds->ptt_port_enabled))) { + + state->state = GPTP_PSS_SEND_SEND_MD_SYNC; + + } else if ((state->state == GPTP_PSS_SEND_SEND_MD_SYNC) || + (state->rcvd_pss_sync && + !state->sync_itv_timer_expired && + (global_ds->selected_role[port] == + GPTP_PORT_MASTER) && + state->pss_sync_ptr->local_port_number != port)) { + /* Change state as it may have transitionned from + * SEND_MD_SYNC. + */ + state->state = GPTP_PSS_SEND_SET_SYNC_RECEIPT_TIMEOUT; + + /* Stop and (re)start receipt timeout timer. */ + k_timer_stop(&state->sync_receipt_timeout_timer); + state->sync_receipt_timeout_timer_expired = false; + + duration = (state->last_sync_receipt_timeout_time - + GPTP_GET_CURRENT_TIME_NANOSECOND()) / + ((NSEC_PER_USEC) * (USEC_PER_MSEC)); + + k_timer_start(&state->sync_receipt_timeout_timer, + duration, 0); + + } else if (state->sync_receipt_timeout_timer_expired) { + state->state = GPTP_PSS_SEND_SYNC_RECEIPT_TIMEOUT; + } + break; + + } +} + +static void gptp_mi_site_ss_prepare_pss_send(void) +{ + struct gptp_site_sync_sync_state *state; + + state = &GPTP_STATE()->site_ss; + + memcpy(&state->pss_send, state->pss_rcv_ptr, + sizeof(struct gptp_mi_port_sync_sync)); +} + +static void gptp_mi_site_ss_send_to_pss(void) +{ + struct gptp_site_sync_sync_state *state; + struct gptp_pss_send_state *pss_send; + int port; + + state = &GPTP_STATE()->site_ss; + + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + pss_send = &GPTP_PORT_STATE(port)->pss_send; + pss_send->pss_sync_ptr = &state->pss_send; + pss_send->rcvd_pss_sync = true; + } +} + +static void gptp_mi_site_sync_sync_state_machine(void) +{ + bool gm_present; + u16_t local_port_number; + struct gptp_site_sync_sync_state *state; + struct gptp_clk_slave_sync_state *clk_ss; + + state = &GPTP_STATE()->site_ss; + clk_ss = &GPTP_STATE()->clk_slave_sync; + gm_present = GPTP_GLOBAL_DS()->gm_present; + + if (!state->pss_rcv_ptr) { + /* We do not have connection to GM yet */ + return; + } + + local_port_number = state->pss_rcv_ptr->local_port_number; + + switch (state->state) { + case GPTP_SSS_INITIALIZING: + state->rcvd_pss = false; + state->state = GPTP_SSS_RECEIVING_SYNC; + break; + + case GPTP_SSS_RECEIVING_SYNC: + if (state->rcvd_pss) { + state->rcvd_pss = false; + if (gptp_is_slave_port(local_port_number) && + gm_present) { + gptp_mi_site_ss_prepare_pss_send(); + + /* + * Send Port Sync Sync to all + * PortSyncSyncSend State Machines. + */ + gptp_mi_site_ss_send_to_pss(); + + /* + * Send PortSyncSync to + * ClockSlaveSync State Machine. + */ + clk_ss->pss_rcv_ptr = &state->pss_send; + clk_ss->rcvd_pss = true; + } + } + + break; + } +} + +static void gptp_mi_clk_slave_sync_compute(void) +{ + struct gptp_clk_slave_sync_state *state; + struct gptp_global_ds *global_ds; + struct gptp_md_sync_info *pss; + struct gptp_port_ds *port_ds; + u64_t sync_receipt_time; + + state = &GPTP_STATE()->clk_slave_sync; + global_ds = GPTP_GLOBAL_DS(); + port_ds = GPTP_PORT_DS(state->pss_rcv_ptr->local_port_number); + + pss = &state->pss_rcv_ptr->sync_info; + + sync_receipt_time = pss->rate_ratio; + sync_receipt_time /= port_ds->neighbor_rate_ratio; + sync_receipt_time *= port_ds->neighbor_prop_delay; + sync_receipt_time += pss->follow_up_correction_field; + sync_receipt_time += port_ds->delay_asymmetry; + + global_ds->sync_receipt_time.second = sync_receipt_time / NSEC_PER_SEC; + global_ds->sync_receipt_time.nanosecond = + sync_receipt_time % NSEC_PER_SEC; + global_ds->sync_receipt_time.second += pss->precise_orig_ts.second; + global_ds->sync_receipt_time.nanosecond += + pss->precise_orig_ts.nanosecond; + + global_ds->sync_receipt_local_time = port_ds->delay_asymmetry; + global_ds->sync_receipt_local_time /= pss->rate_ratio; + global_ds->sync_receipt_local_time += + (port_ds->neighbor_prop_delay/port_ds->neighbor_rate_ratio); + global_ds->sync_receipt_local_time += pss->upstream_tx_time; + + global_ds->gm_time_base_indicator = pss->gm_time_base_indicator; + global_ds->last_gm_phase_change.high = pss->last_gm_phase_change.high; + global_ds->last_gm_phase_change.low = pss->last_gm_phase_change.low; + global_ds->last_gm_freq_change = pss->last_gm_freq_change; +} + +#if defined(CONFIG_NET_GPTP_USE_DEFAULT_CLOCK_UPDATE) +static void gptp_update_local_port_clock(void) +{ + struct gptp_clk_slave_sync_state *state; + struct gptp_global_ds *global_ds; + struct gptp_port_ds *port_ds; + int port; + s64_t nanosecond_diff; + s64_t second_diff; + struct ptp_clock *clk; + struct net_ptp_time tm; + int key; + + state = &GPTP_STATE()->clk_slave_sync; + global_ds = GPTP_GLOBAL_DS(); + port = state->pss_rcv_ptr->local_port_number; + NET_ASSERT((port >= GPTP_PORT_START) && (port <= GPTP_PORT_END)); + + port_ds = GPTP_PORT_DS(port); + + second_diff = global_ds->sync_receipt_time.second - + (global_ds->sync_receipt_local_time / NSEC_PER_SEC); + nanosecond_diff = global_ds->sync_receipt_time.nanosecond - + (global_ds->sync_receipt_local_time % NSEC_PER_SEC); + + clk = ptp_clock_lookup_by_dev(GPTP_PORT_DRV(port)); + if (!clk) { + return; + } + + ptp_clock_rate_adjust(clk, port_ds->neighbor_rate_ratio); + + /* If time difference is too high, set the clock value. + * Otherwise, adjust it. + */ + if (second_diff || (second_diff == 0 && + (nanosecond_diff < -5000 || + nanosecond_diff > 5000))) { + key = irq_lock(); + ptp_clock_get(clk, &tm); + tm.second += second_diff; + tm.nanosecond += nanosecond_diff; + if (tm.nanosecond < 0) { + tm.second--; + tm.nanosecond += NSEC_PER_SEC; + } else if (tm.nanosecond >= NSEC_PER_SEC) { + tm.second++; + tm.nanosecond -= NSEC_PER_SEC; + } + + ptp_clock_set(clk, &tm); + irq_unlock(key); + } else { + if (nanosecond_diff < -200) { + nanosecond_diff = -200; + } else if (nanosecond_diff > 200) { + nanosecond_diff = 200; + } + + ptp_clock_adjust(clk, nanosecond_diff); + } +} +#endif /* CONFIG_NET_GPTP_USE_DEFAULT_CLOCK_UPDATE */ + +static void gptp_mi_clk_slave_sync_state_machine(void) +{ + struct gptp_clk_slave_sync_state *state; + + state = &GPTP_STATE()->clk_slave_sync; + + switch (state->state) { + case GPTP_CLK_SLAVE_SYNC_INITIALIZING: + state->rcvd_pss = false; + state->state = GPTP_CLK_SLAVE_SYNC_SEND_SYNC_IND; + break; + + case GPTP_CLK_SLAVE_SYNC_SEND_SYNC_IND: + if (state->rcvd_pss) { + state->rcvd_pss = false; + gptp_mi_clk_slave_sync_compute(); + +#if defined(CONFIG_NET_GPTP_USE_DEFAULT_CLOCK_UPDATE) + /* Instead of updating SlaveClock, update LocalClock */ + gptp_update_local_port_clock(); +#endif + gptp_call_phase_dis_cb(); + } + break; + } +} + +static void gptp_mi_clk_master_sync_rcv_state_machine(void) +{ + struct gptp_clk_master_sync_state *state; + + state = &GPTP_STATE()->clk_master_sync_receive; + switch (state->state) { + case GPTP_CMS_RCV_INITIALIZING: + state->state = GPTP_CMS_RCV_WAITING; + break; + + case GPTP_CMS_RCV_WAITING: + if (state->rcvd_clock_source_req || + state->rcvd_local_clock_tick) { + state->state = GPTP_CMS_RCV_SOURCE_TIME; + } + + break; + + case GPTP_CMS_RCV_SOURCE_TIME: + /* TODO: updateMasterTime(); */ + /*localTime = currentTime;*/ + if (state->rcvd_clock_source_req) { + /* TODO: + computeGMRateRatio(); + Update: + clockSourceTimeBaseIndicatorOld; + clockSourceTimeBaseIndicator + clockSourceLastGmPhaseChange + clockSourceLastGmFreqChange*/ + } + + state->rcvd_clock_source_req = false; + state->rcvd_local_clock_tick = false; + state->state = GPTP_CMS_RCV_WAITING; + break; + + default: + NET_ERR("Unrecognised state"); + break; + } +} + +static void copy_path_trace(struct gptp_announce *announce) +{ + struct gptp_path_trace *sys_path_trace; + int len = ntohs(announce->tlv.len); + + sys_path_trace = &GPTP_GLOBAL_DS()->path_trace; + + sys_path_trace->len = htons(len + GPTP_CLOCK_ID_LEN); + memcpy(sys_path_trace->path_sequence, announce->tlv.path_sequence, + len); + + /* Append local clockIdentity. */ + memcpy((u8_t *)sys_path_trace->path_sequence + len, + GPTP_DEFAULT_DS()->clk_id, GPTP_CLOCK_ID_LEN); +} + +static bool gptp_mi_qualify_announce(int port, struct net_pkt *announce_msg) +{ + struct gptp_announce *announce; + struct gptp_hdr *hdr; + int i; + u16_t len; + + hdr = GPTP_HDR(announce_msg); + announce = GPTP_ANNOUNCE(announce_msg); + + if (memcmp(hdr->port_id.clk_id, GPTP_DEFAULT_DS()->clk_id, + GPTP_CLOCK_ID_LEN) == 0) { + return false; + } + + len = ntohs(announce->steps_removed); + if (len >= 255) { + return false; + } + + for (i = 0; i < len + 1; i++) { + if (memcmp(announce->tlv.path_sequence[i], + GPTP_DEFAULT_DS()->clk_id, + GPTP_CLOCK_ID_LEN) == 0) { + return false; + } + } + + if (GPTP_GLOBAL_DS()->selected_role[port] == GPTP_PORT_SLAVE) { + copy_path_trace(announce); + } + + return true; +} + +static void gptp_mi_port_announce_receive_state_machine(int port) +{ + struct gptp_port_ds *port_ds; + struct gptp_port_announce_receive_state *state; + struct gptp_port_bmca_data *bmca_data; + + state = &GPTP_PORT_STATE(port)->pa_rcv; + port_ds = GPTP_PORT_DS(port); + bmca_data = GPTP_PORT_BMCA_DATA(port); + + if ((!port_ds->ptt_port_enabled) || (!port_ds->as_capable)) { + state->state = GPTP_PA_RCV_DISCARD; + } + + switch (state->state) { + case GPTP_PA_RCV_DISCARD: + state->rcvd_announce = false; + bmca_data->rcvd_msg = false; + if (bmca_data->rcvd_announce_ptr != NULL) { + net_pkt_unref(bmca_data->rcvd_announce_ptr); + bmca_data->rcvd_announce_ptr = NULL; + } + + state->state = GPTP_PA_RCV_RECEIVE; + break; + + case GPTP_PA_RCV_RECEIVE: + /* "portEnabled" is not checked: the interface is always up. */ + if (state->rcvd_announce + && port_ds->ptt_port_enabled + && port_ds->as_capable + && !bmca_data->rcvd_msg) { + state->rcvd_announce = false; + bmca_data->rcvd_msg = + gptp_mi_qualify_announce(port, + bmca_data->rcvd_announce_ptr); + if (!bmca_data->rcvd_msg) { + net_pkt_unref(bmca_data->rcvd_announce_ptr); + bmca_data->rcvd_announce_ptr = NULL; + } + } + + break; + } +} + +/* + * Compare a vector to an announce message vector. + * All must be in big endian (network) order. + */ +static enum gptp_received_info compare_priority_vectors( + struct gptp_priority_vector *vector, + struct net_pkt *pkt, int port) +{ + struct gptp_hdr *hdr; + struct gptp_announce *announce; + struct gptp_port_bmca_data *bmca_data; + int rsi_cmp, spi_cmp, port_cmp; + + bmca_data = GPTP_PORT_BMCA_DATA(port); + hdr = GPTP_HDR(pkt); + announce = GPTP_ANNOUNCE(pkt); + + /* Compare rootSystemIdentity and stepsRemoved. */ + rsi_cmp = memcmp(&announce->root_system_id, + &vector->root_system_id, + sizeof(struct gptp_root_system_identity) + + sizeof(u16_t)); + if (rsi_cmp < 0) { + /* Better rootSystemIdentity. */ + return GPTP_RCVD_INFO_SUPERIOR_MASTER_INFO; + } + + /* Compare sourcePortIdentity. */ + spi_cmp = memcmp(&hdr->port_id, &vector->src_port_id, + sizeof(struct gptp_port_identity)); + + port_cmp = (int)port - ntohs(vector->port_number); + + if (spi_cmp == 0) { + if (rsi_cmp == 0) { + if (port_cmp == 0) { + /* Same priority vector. */ + return GPTP_RCVD_INFO_REPEATED_MASTER_INFO; + } else if (port_cmp < 0) { + /* Priority vector with better reception port + * number. + */ + return GPTP_RCVD_INFO_SUPERIOR_MASTER_INFO; + } + } else { + /* Same master port but different Grand Master. */ + return GPTP_RCVD_INFO_SUPERIOR_MASTER_INFO; + } + } else if ((spi_cmp < 0) && (rsi_cmp == 0)) { + /* Same Grand Master but better masterPort. */ + return GPTP_RCVD_INFO_SUPERIOR_MASTER_INFO; + } + + return GPTP_RCVD_INFO_INFERIOR_MASTER_INFO; +} + +static enum gptp_received_info rcv_info(int port) +{ + /* TODO + * How can we define that a message does not convey the port + * role Master port ? + * It is needed to define that to be able to send + * GPTP_RCVD_INFO_OTHER_INFO. + */ + struct gptp_port_bmca_data *bmca_data; + struct gptp_announce *announce; + + bmca_data = GPTP_PORT_BMCA_DATA(port); + announce = GPTP_ANNOUNCE(bmca_data->rcvd_announce_ptr); + + bmca_data->message_steps_removed = announce->steps_removed; + + return compare_priority_vectors(&bmca_data->port_priority, + bmca_data->rcvd_announce_ptr, + port); +} + +static void record_other_announce_info(int port) +{ + struct gptp_hdr *hdr; + struct gptp_announce *announce; + struct gptp_port_bmca_data *bmca_data; + + bmca_data = GPTP_PORT_BMCA_DATA(port); + hdr = GPTP_HDR(bmca_data->rcvd_announce_ptr); + announce = GPTP_ANNOUNCE(bmca_data->rcvd_announce_ptr); + + /* Copy leap61, leap59, current UTC offset valid, time traceable and + * frequency traceable flags. + */ + bmca_data->ann_flags.octets[1] = hdr->flags.octets[1]; + + bmca_data->ann_current_utc_offset = ntohs(announce->cur_utc_offset); + bmca_data->ann_time_source = announce->time_source; +} + +static void copy_priority_vector(struct gptp_priority_vector *vector, + struct net_pkt *pkt, int port) +{ + struct gptp_hdr *hdr; + struct gptp_announce *announce; + + hdr = GPTP_HDR(pkt); + announce = GPTP_ANNOUNCE(pkt); + + memcpy(&vector->root_system_id, &announce->root_system_id, + sizeof(struct gptp_root_system_identity) + sizeof(u16_t)); + + memcpy(&vector->src_port_id, &hdr->port_id, + sizeof(struct gptp_port_identity)); + + vector->port_number = htons(port); +} + +static void gptp_mi_port_announce_information_state_machine(int port) +{ + struct gptp_port_ds *port_ds; + struct gptp_global_ds *global_ds; + struct gptp_port_announce_information_state *state; + struct gptp_announce *announce; + struct gptp_hdr *hdr; + struct gptp_port_bmca_data *bmca_data; + struct gptp_pss_rcv_state *pss_rcv; + + bmca_data = GPTP_PORT_BMCA_DATA(port); + state = &GPTP_PORT_STATE(port)->pa_info; + port_ds = GPTP_PORT_DS(port); + global_ds = GPTP_GLOBAL_DS(); + + if ((!port_ds->ptt_port_enabled || !port_ds->as_capable) + && (bmca_data->info_is != + GPTP_INFO_IS_DISABLED)) { + state->state = GPTP_PA_INFO_DISABLED; + } + + switch (state->state) { + case GPTP_PA_INFO_DISABLED: + bmca_data->rcvd_msg = false; + bmca_data->info_is = GPTP_INFO_IS_DISABLED; + SET_RESELECT(global_ds, port); + CLEAR_SELECTED(global_ds, port); + state->state = GPTP_PA_INFO_POST_DISABLED; + k_timer_stop(&state->ann_rcpt_expiry_timer); + state->ann_expired = true; + /* Fallthrough. */ + + case GPTP_PA_INFO_POST_DISABLED: + if (port_ds->ptt_port_enabled && port_ds->as_capable) { + state->state = GPTP_PA_INFO_AGED; + } else if (bmca_data->rcvd_msg) { + state->state = GPTP_PA_INFO_DISABLED; + } + + break; + + case GPTP_PA_INFO_AGED: + bmca_data->info_is = GPTP_INFO_IS_AGED; + CLEAR_SELECTED(global_ds, port); + SET_RESELECT(global_ds, port); + /* Transition will be actually tested in UPDATE state. */ + state->state = GPTP_PA_INFO_UPDATE; + break; + + case GPTP_PA_INFO_UPDATE: + if (IS_SELECTED(global_ds, port) && bmca_data->updt_info) { + memcpy(&bmca_data->port_priority, + &bmca_data->master_priority, + sizeof(struct gptp_priority_vector)); + bmca_data->port_steps_removed = + global_ds->master_steps_removed; + bmca_data->updt_info = false; + bmca_data->info_is = GPTP_INFO_IS_MINE; + bmca_data->new_info = true; + state->state = GPTP_PA_INFO_CURRENT; + } + + break; + + case GPTP_PA_INFO_CURRENT: + pss_rcv = &GPTP_PORT_STATE(port)->pss_rcv; + if (IS_SELECTED(global_ds, port) && bmca_data->updt_info) { + state->state = GPTP_PA_INFO_UPDATE; + } else if (bmca_data->rcvd_msg && !bmca_data->updt_info) { + state->state = GPTP_PA_INFO_RECEIVE; + } else if ((bmca_data->info_is == GPTP_INFO_IS_RECEIVED) + && !bmca_data->updt_info + && !bmca_data->rcvd_msg + && (state->ann_expired + || (global_ds->gm_present + && pss_rcv->sync_receipt_timeout_timer_expired) + )) { + state->state = GPTP_PA_INFO_AGED; + } + + break; + + case GPTP_PA_INFO_RECEIVE: + switch (rcv_info(port)) { + case GPTP_RCVD_INFO_SUPERIOR_MASTER_INFO: + state->state = GPTP_PA_INFO_SUPERIOR_MASTER_PORT; + break; + case GPTP_RCVD_INFO_REPEATED_MASTER_INFO: + state->state = GPTP_PA_INFO_REPEATED_MASTER_PORT; + break; + case GPTP_RCVD_INFO_INFERIOR_MASTER_INFO: + /* Fallthrough. */ + case GPTP_RCVD_INFO_OTHER_INFO: + state->state = + GPTP_PA_INFO_INFERIOR_MASTER_OR_OTHER_PORT; + break; + } + + break; + + case GPTP_PA_INFO_SUPERIOR_MASTER_PORT: + /* We copy directly the content of the message to the port + * priority vector without using an intermediate messagePrioriry + * structure. + */ + + if (bmca_data->rcvd_announce_ptr == NULL) { + /* Shouldn't be reached. Checked for safety reason. */ + bmca_data->rcvd_msg = false; + state->state = GPTP_PA_INFO_CURRENT; + break; + } + + copy_priority_vector(&bmca_data->port_priority, + bmca_data->rcvd_announce_ptr, port); + + announce = GPTP_ANNOUNCE(bmca_data->rcvd_announce_ptr); + bmca_data->port_steps_removed = ntohs(announce->steps_removed); + record_other_announce_info(port); + hdr = GPTP_HDR(bmca_data->rcvd_announce_ptr); + gptp_set_time_itv(&bmca_data->ann_rcpt_timeout_time_interval, + port_ds->announce_receipt_timeout, + hdr->log_msg_interval); + bmca_data->info_is = GPTP_INFO_IS_RECEIVED; + CLEAR_SELECTED(global_ds, port); + SET_RESELECT(global_ds, port); + /* Fallthrough. */ + + case GPTP_PA_INFO_REPEATED_MASTER_PORT: + k_timer_stop(&state->ann_rcpt_expiry_timer); + state->ann_expired = false; + k_timer_start(&state->ann_rcpt_expiry_timer, + gptp_uscaled_ns_to_timer_ms( + &bmca_data->ann_rcpt_timeout_time_interval), + 0); + /* Fallthrough. */ + + case GPTP_PA_INFO_INFERIOR_MASTER_OR_OTHER_PORT: + if (bmca_data->rcvd_announce_ptr != NULL) { + net_pkt_unref(bmca_data->rcvd_announce_ptr); + bmca_data->rcvd_announce_ptr = NULL; + } + bmca_data->rcvd_msg = false; + state->state = GPTP_PA_INFO_CURRENT; + break; + } +} + +static void gptp_updt_role_disabled_tree(void) +{ + struct gptp_global_ds *global_ds; + int port; + + global_ds = GPTP_GLOBAL_DS(); + + /* Set all elements of the selectedRole array to DisabledPort. */ + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + global_ds->selected_role[port] = GPTP_PORT_DISABLED; + } + + /* Set lastGmPriority to all ones. */ + memset(&global_ds->last_gm_priority, 0xFF, + sizeof(struct gptp_priority_vector)); + + /* Set pathTrace array to contain the single element thisClock. */ + global_ds->path_trace.len = htons(GPTP_CLOCK_ID_LEN); + memcpy(global_ds->path_trace.path_sequence, GPTP_DEFAULT_DS()->clk_id, + GPTP_CLOCK_ID_LEN); + +} + +static void gptp_clear_reselect_tree(void) +{ + /* Set all the elements of the reselect array to FALSE. */ + GPTP_GLOBAL_DS()->reselect_array = 0; +} + +static int compute_best_vector(void) +{ + struct gptp_priority_vector *gm_prio; + struct gptp_default_ds *default_ds; + struct gptp_global_ds *global_ds; + struct gptp_priority_vector *best_vector, *challenger; + int best_port, port, tmp; + struct gptp_pss_rcv_state *pss_rcv; + struct gptp_port_announce_information_state *pa_info_state; + + default_ds = GPTP_DEFAULT_DS(); + global_ds = GPTP_GLOBAL_DS(); + best_port = 0; + gm_prio = &global_ds->gm_priority; + + /* Write systemPriority into grandmaster. */ + memset(gm_prio, 0, sizeof(struct gptp_priority_vector)); + gm_prio->root_system_id.grand_master_prio1 = default_ds->priority1; + gm_prio->root_system_id.grand_master_prio2 = default_ds->priority2; + gm_prio->root_system_id.clk_quality.clock_class = + default_ds->clk_quality.clock_class; + gm_prio->root_system_id.clk_quality.clock_accuracy = + default_ds->clk_quality.clock_accuracy; + gm_prio->root_system_id.clk_quality.offset_scaled_log_var = + htons(default_ds->clk_quality.offset_scaled_log_var); + memcpy(gm_prio->src_port_id.clk_id, default_ds->clk_id, + GPTP_CLOCK_ID_LEN); + memcpy(gm_prio->root_system_id.grand_master_id, default_ds->clk_id, + GPTP_CLOCK_ID_LEN); + + best_vector = gm_prio; + + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + challenger = &GPTP_PORT_BMCA_DATA(port)->port_priority; + pa_info_state = &GPTP_PORT_STATE(port)->pa_info; + pss_rcv = &GPTP_PORT_STATE(port)->pss_rcv; + + if (pa_info_state->ann_expired || + (global_ds->gm_present && + pss_rcv->sync_receipt_timeout_timer_expired)) { + continue; + } + + if (memcmp(challenger->src_port_id.clk_id, default_ds->clk_id, + GPTP_CLOCK_ID_LEN) == 0) { + /* Discard this challenger. */ + continue; + } + + if (best_port == 0) { + tmp = memcmp(&challenger->root_system_id, + &best_vector->root_system_id, + sizeof(struct gptp_root_system_identity)); + if (tmp < 0) { + best_vector = challenger; + best_port = port; + } else if (tmp > 0) { + continue; + } + + tmp = (int)challenger->steps_removed - + ((int)ntohs(best_vector->steps_removed) + 1); + if (tmp < 0) { + best_vector = challenger; + best_port = port; + } else if (tmp > 0) { + continue; + } + tmp = memcmp(&challenger->src_port_id, + &best_vector->src_port_id, + sizeof(struct gptp_port_identity)); + if (tmp < 0) { + best_vector = challenger; + best_port = port; + } else if (tmp > 0) { + continue; + } + + if (ntohs(challenger->port_number) < + ntohs(best_vector->port_number)) { + best_vector = challenger; + best_port = port; + } + + } else { + /* We can compare portPriority vectors without + * calculating pathPriority vectors. + */ + if (memcmp(challenger, + best_vector, + sizeof(struct gptp_priority_vector)) < 0) { + best_vector = challenger; + best_port = port; + } + } + } + + if (best_port != 0) { + memcpy(&global_ds->gm_priority.root_system_id, + &best_vector->root_system_id, + sizeof(struct gptp_root_system_identity)); + + global_ds->gm_priority.steps_removed = + htons(ntohs(best_vector->steps_removed) + 1); + + memcpy(&global_ds->gm_priority.src_port_id, + &best_vector->src_port_id, + sizeof(struct gptp_port_identity)); + + global_ds->gm_priority.port_number = best_vector->port_number; + } + + return best_port; +} + +static void gptp_updt_roles_tree(void) +{ + struct gptp_global_ds *global_ds; + struct gptp_default_ds *default_ds; + struct gptp_priority_vector *gm_prio, *last_gm_prio; + struct gptp_port_bmca_data *bmca_data; + int port, best_port; + + global_ds = GPTP_GLOBAL_DS(); + default_ds = GPTP_DEFAULT_DS(); + + gm_prio = &global_ds->gm_priority; + last_gm_prio = &global_ds->last_gm_priority; + + /* Save gmPriority. */ + memcpy(last_gm_prio, gm_prio, sizeof(struct gptp_priority_vector)); + + best_port = compute_best_vector(); + + /* If the best vector was the systemPriorityVector. */ + if (best_port == 0) { + /* Copy leap61, leap59, current UTC offset valid, + * time traceable and frequency traceable flags. + */ + global_ds->global_flags.octets[1] = + global_ds->sys_flags.octets[1]; + + global_ds->current_utc_offset = + global_ds->sys_current_utc_offset; + + global_ds->time_source = global_ds->sys_time_source; + + global_ds->master_steps_removed = 0; + } else { + bmca_data = GPTP_PORT_BMCA_DATA(best_port); + + /* Copy leap61, leap59, current UTC offset valid, + * time traceable and frequency traceable flags. + */ + global_ds->global_flags.octets[1] = + bmca_data->ann_flags.octets[1]; + + global_ds->current_utc_offset = + global_ds->sys_current_utc_offset; + + global_ds->time_source = bmca_data->ann_time_source; + + global_ds->master_steps_removed = + htons(ntohs(bmca_data->message_steps_removed) + 1); + } + + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + bmca_data = GPTP_PORT_BMCA_DATA(port); + + /* Update masterPriorityVector for the port. */ + if (best_port == 0) { + memcpy(&bmca_data->master_priority, gm_prio, + sizeof(struct gptp_priority_vector)); + bmca_data->master_priority.port_number = htons(port); + bmca_data->master_priority.src_port_id.port_number = + htons(port); + } else { + memcpy(&bmca_data->master_priority.root_system_id, + &gm_prio->root_system_id, + sizeof(struct gptp_root_system_identity)); + memcpy(bmca_data->master_priority.src_port_id.clk_id, + default_ds->clk_id, GPTP_CLOCK_ID_LEN); + bmca_data->master_priority.port_number = htons(port); + bmca_data->master_priority.src_port_id.port_number = + htons(port); + } + + switch (bmca_data->info_is) { + case GPTP_INFO_IS_DISABLED: + global_ds->selected_role[port] = GPTP_PORT_DISABLED; + break; + + case GPTP_INFO_IS_AGED: + bmca_data->updt_info = true; + global_ds->selected_role[port] = GPTP_PORT_MASTER; + break; + + case GPTP_INFO_IS_MINE: + global_ds->selected_role[port] = GPTP_PORT_MASTER; + if ((memcmp(&bmca_data->port_priority, + &bmca_data->master_priority, + sizeof(struct gptp_priority_vector)) != 0) + || (bmca_data->port_steps_removed != + global_ds->master_steps_removed)) { + bmca_data->updt_info = true; + } + + break; + + case GPTP_INFO_IS_RECEIVED: + if (best_port == port) { + /* gmPriorityVector is now derived from + * portPriorityVector. + */ + global_ds->selected_role[port] = + GPTP_PORT_SLAVE; + bmca_data->updt_info = false; + } else if (memcmp(&bmca_data->port_priority, + &bmca_data->master_priority, + sizeof(struct gptp_priority_vector)) + <= 0) { + /* The masterPriorityVector is not better than + * the portPriorityVector. + */ + global_ds->selected_role[port] = + GPTP_PORT_PASSIVE; + if (memcmp(bmca_data->port_priority. + src_port_id.clk_id, + default_ds->clk_id, + GPTP_CLOCK_ID_LEN)) { + /* The sourcePortIdentity component of + * the portPriorityVector does not + * reflect another port on the + * time-aware system. + */ + bmca_data->updt_info = true; + } else { + bmca_data->updt_info = false; + } + } else { + global_ds->selected_role[port] = + GPTP_PORT_MASTER; + bmca_data->updt_info = true; + } + + break; + } + } + + /* Update gmPresent. */ + global_ds->gm_present = + (gm_prio->root_system_id.grand_master_prio1 == 255) ? + false : true; + + /* Assign the port role for port 0. */ + for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) { + if (global_ds->selected_role[port] == GPTP_PORT_SLAVE) { + global_ds->selected_role[0] = GPTP_PORT_PASSIVE; + break; + } + } + + if (port == GPTP_PORT_END) { + global_ds->selected_role[0] = GPTP_PORT_SLAVE; + } + + /* If current system is the Grand Master, set pathTrace array. */ + if (memcmp(default_ds->clk_id, gm_prio->root_system_id.grand_master_id, + GPTP_CLOCK_ID_LEN) == 0) { + global_ds->path_trace.len = htons(GPTP_CLOCK_ID_LEN); + memcpy(global_ds->path_trace.path_sequence, + default_ds->clk_id, GPTP_CLOCK_ID_LEN); + } +} + +static void gptp_set_selected_tree(void) +{ + /* Set all the elements of the selected array to TRUE. */ + GPTP_GLOBAL_DS()->selected_array = ~0; +} + +static void gptp_mi_port_role_selection_state_machine(void) +{ + struct gptp_port_role_selection_state *state; + + state = &GPTP_STATE()->pr_sel; + + switch (state->state) { + case GPTP_PR_SELECTION_INIT_BRIDGE: + gptp_updt_role_disabled_tree(); + state->state = GPTP_PR_SELECTION_ROLE_SELECTION; + + /* Be sure to enter the "if" statement immediately after. */ + GPTP_GLOBAL_DS()->reselect_array = ~0; + /* Fallthrough. */ + + case GPTP_PR_SELECTION_ROLE_SELECTION: + if (GPTP_GLOBAL_DS()->reselect_array != 0) { + gptp_clear_reselect_tree(); + gptp_updt_roles_tree(); + gptp_set_selected_tree(); + } + + break; + } +} + +static void tx_announce(int port) +{ + struct net_pkt *pkt; + + pkt = gptp_prepare_announce(port); + if (pkt) { + gptp_send_announce(port, pkt); + } +} + +static void gptp_mi_port_announce_transmit_state_machine(int port) +{ + struct gptp_port_ds *port_ds; + struct gptp_global_ds *global_ds; + struct gptp_port_announce_transmit_state *state; + struct gptp_port_bmca_data *bmca_data; + + port_ds = GPTP_PORT_DS(port); + global_ds = GPTP_GLOBAL_DS(); + bmca_data = GPTP_PORT_BMCA_DATA(port); + state = &GPTP_PORT_STATE(port)->pa_transmit; + + /* Reset interval as defined in AnnounceIntervalSetting + * state machine. + */ + if (port_ds->ptt_port_enabled && !port_ds->prev_ptt_port_enabled) { + gptp_update_announce_interval(port, GPTP_ITV_SET_TO_INIT); + } + + switch (state->state) { + case GPTP_PA_TRANSMIT_INIT: + bmca_data->new_info = true; + /* Fallthrough. */ + + case GPTP_PA_TRANSMIT_IDLE: + k_timer_stop(&state->ann_send_periodic_timer); + state->ann_trigger = false; + k_timer_start(&state->ann_send_periodic_timer, + gptp_uscaled_ns_to_timer_ms( + &bmca_data->announce_interval), + 0); + + state->state = GPTP_PA_TRANSMIT_POST_IDLE; + /* Fallthrough. */ + + case GPTP_PA_TRANSMIT_POST_IDLE: + if (IS_SELECTED(global_ds, port) + && !bmca_data->updt_info + && state->ann_trigger) { + + state->state = GPTP_PA_TRANSMIT_PERIODIC; + + } else if (IS_SELECTED(global_ds, port) + && !bmca_data->updt_info + && !state->ann_trigger + && (global_ds->selected_role[port] == + GPTP_PORT_MASTER) + && bmca_data->new_info) { + bmca_data->new_info = false; + tx_announce(port); + state->state = GPTP_PA_TRANSMIT_IDLE; + } + + break; + + case GPTP_PA_TRANSMIT_PERIODIC: + if (global_ds->selected_role[port] == GPTP_PORT_MASTER) { + bmca_data->new_info = true; + } + state->state = GPTP_PA_TRANSMIT_IDLE; + break; + } +} + + +void gptp_mi_port_sync_state_machines(int port) +{ + gptp_mi_pss_rcv_state_machine(port); + gptp_mi_pss_send_state_machine(port); +} + +void gptp_mi_port_bmca_state_machines(int port) +{ + gptp_mi_port_announce_receive_state_machine(port); + gptp_mi_port_announce_information_state_machine(port); + gptp_mi_port_announce_transmit_state_machine(port); +} + +void gptp_mi_state_machines(void) +{ + gptp_mi_site_sync_sync_state_machine(); + gptp_mi_clk_slave_sync_state_machine(); + gptp_mi_port_role_selection_state_machine(); + gptp_mi_clk_master_sync_rcv_state_machine(); +} diff --git a/subsys/net/ip/l2/gptp/gptp_private.h b/subsys/net/ip/l2/gptp/gptp_private.h new file mode 100644 index 0000000000000..64133252bdff4 --- /dev/null +++ b/subsys/net/ip/l2/gptp/gptp_private.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief Private functions for the Precision Time Protocol Stack. + * + * This is not to be included by the application. + */ + +#ifndef __GPTP_PRIVATE_H +#define __GPTP_PRIVATE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(CONFIG_NET_GPTP) + +#include + +/* Common defines for the gPTP stack. */ +#define GPTP_THREAD_WAIT_TIMEOUT_MS 1 +#define GPTP_MULTIPLE_PDELAY_RESP_WAIT K_MINUTES(5) + +#define USCALED_NS_TO_MS(val) ((val >> 16) / 1000000) + +#if defined(CONFIG_NET_GPTP_STATISTICS) +#define GPTP_STATS_INC(port, var) (GPTP_PORT_PARAM_DS(port)->var++) +#else +#define GPTP_STATS_INC(port, var) +#endif + +/** + * @brief Is a slave acting as a slave. + * + * Utility to check if a port is configured as a slave. + * + * @param port Port to check. + * + * @return True if this is a slave port. + */ +bool gptp_is_slave_port(int port); + +/** + * @brief Convert the network interface to the correct port number. + * + * @param iface Network Interface acting as a ptp port. + * + * @return Number of the port if found, ENODEV otherwise. + */ +int gptp_get_port_number(struct net_if *iface); + +/** + * @brief Calculate a logInteral and store in Uscaled ns structure. + * + * @param interval Result of calculation. + * + * @param seconds Seconds of interval. + * + * @param log_msg_interval Logarithm 2 to apply to this interval. + */ +void gptp_set_time_itv(struct gptp_uscaled_ns *interval, + u16_t seconds, + s8_t log_msg_interval); + +/** + * @brief Convert uscaled ns to ms for timer use. + * + * @param usns Pointer to uscaled nanoseconds to convert. + * + * @return INT32_MAX if value exceed timer max value, 0 if the result of the + * conversion is less 1ms, the converted value otherwise. + */ +s32_t gptp_uscaled_ns_to_timer_ms(struct gptp_uscaled_ns *usns); + +/** + * @brief Update pDelay request interval and its timer. + * + * @param port Port number. + * + * @param log_val New logarithm 2 to apply to this interval. + */ +void gptp_update_pdelay_req_interval(int port, s8_t log_val); + +/** + * @brief Update sync interval and its timer. + * + * @param port Port number. + * + * @param log_val New logarithm 2 to apply to this interval. + */ +void gptp_update_sync_interval(int port, s8_t log_val); + +/** + * @brief Update announce interval and its timer. + * + * @param port Port number. + * + * @param log_val New logarithm 2 to apply to this interval. + */ + +void gptp_update_announce_interval(int port, s8_t log_val); + +/** + * @brief Convert a ptp timestamp to nanoseconds. + * + * @param ts A PTP timestamp. + * + * @return Number of nanoseconds. + */ +static inline u64_t gptp_timestamp_to_nsec(struct net_ptp_time *ts) +{ + return (ts->second * NSEC_PER_SEC) + ts->nanosecond; +} + +#endif /* CONFIG_NET_GPTP */ + +#ifdef __cplusplus +} +#endif + +#endif /* __GPTP_PRIVATE_H */ diff --git a/subsys/net/ip/net_context.c b/subsys/net/ip/net_context.c index 91d9344fb3990..307b4f9d0a0ff 100644 --- a/subsys/net/ip/net_context.c +++ b/subsys/net/ip/net_context.c @@ -1180,11 +1180,11 @@ NET_CONN_CB(tcp_established) if (tcp_flags & NET_TCP_RST) { /* We only accept RST packet that has valid seq field. */ if (!net_tcp_validate_seq(context->tcp, pkt)) { - net_stats_update_tcp_seg_rsterr(); + net_stats_update_tcp_seg_rsterr(net_pkt_iface(pkt)); return NET_DROP; } - net_stats_update_tcp_seg_rst(); + net_stats_update_tcp_seg_rst(net_pkt_iface(pkt)); net_tcp_print_recv_info("RST", pkt, tcp_hdr->src_port); @@ -1320,11 +1320,11 @@ NET_CONN_CB(tcp_synack_received) if (NET_TCP_FLAGS(tcp_hdr) & NET_TCP_RST) { /* We only accept RST packet that has valid seq field. */ if (!net_tcp_validate_seq(context->tcp, pkt)) { - net_stats_update_tcp_seg_rsterr(); + net_stats_update_tcp_seg_rsterr(net_pkt_iface(pkt)); return NET_DROP; } - net_stats_update_tcp_seg_rst(); + net_stats_update_tcp_seg_rst(net_pkt_iface(pkt)); if (context->connect_cb) { context->connect_cb(context, -ECONNREFUSED, @@ -1739,11 +1739,11 @@ NET_CONN_CB(tcp_syn_rcvd) if (NET_TCP_FLAGS(tcp_hdr) == NET_TCP_RST) { if (tcp_backlog_rst(pkt) < 0) { - net_stats_update_tcp_seg_rsterr(); + net_stats_update_tcp_seg_rsterr(net_pkt_iface(pkt)); return NET_DROP; } - net_stats_update_tcp_seg_rst(); + net_stats_update_tcp_seg_rst(net_pkt_iface(pkt)); net_tcp_print_recv_info("RST", pkt, tcp_hdr->src_port); @@ -1852,7 +1852,7 @@ NET_CONN_CB(tcp_syn_rcvd) return NET_DROP; conndrop: - net_stats_update_tcp_seg_conndrop(); + net_stats_update_tcp_seg_conndrop(net_pkt_iface(pkt)); reset: send_reset(tcp->context, &local_addr, &remote_addr); @@ -2292,7 +2292,7 @@ static enum net_verdict packet_received(struct net_conn *conn, net_pkt_appdata(pkt), net_pkt_appdatalen(pkt), net_pkt_get_len(pkt)); - net_stats_update_tcp_recv(net_pkt_appdatalen(pkt)); + net_stats_update_tcp_recv(net_pkt_iface(pkt), net_pkt_appdatalen(pkt)); context->recv_cb(context, pkt, 0, user_data); diff --git a/subsys/net/ip/net_core.c b/subsys/net/ip/net_core.c index e05e8b5682e4d..02d97c9ff257f 100644 --- a/subsys/net/ip/net_core.c +++ b/subsys/net/ip/net_core.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "net_private.h" #include "net_shell.h" @@ -70,7 +71,7 @@ static inline enum net_verdict process_data(struct net_pkt *pkt, /* If there is no data, then drop the packet. */ if (!pkt->frags) { NET_DBG("Corrupted packet (frags %p)", pkt->frags); - net_stats_update_processing_error(); + net_stats_update_processing_error(net_pkt_iface(pkt)); return NET_DROP; } @@ -80,7 +81,8 @@ static inline enum net_verdict process_data(struct net_pkt *pkt, if (ret != NET_CONTINUE) { if (ret == NET_DROP) { NET_DBG("Packet %p discarded by L2", pkt); - net_stats_update_processing_error(); + net_stats_update_processing_error( + net_pkt_iface(pkt)); } return ret; @@ -91,13 +93,13 @@ static inline enum net_verdict process_data(struct net_pkt *pkt, switch (NET_IPV6_HDR(pkt)->vtc & 0xf0) { #if defined(CONFIG_NET_IPV6) case 0x60: - net_stats_update_ipv6_recv(); + net_stats_update_ipv6_recv(net_pkt_iface(pkt)); net_pkt_set_family(pkt, PF_INET6); return net_ipv6_process_pkt(pkt); #endif #if defined(CONFIG_NET_IPV4) case 0x40: - net_stats_update_ipv4_recv(); + net_stats_update_ipv4_recv(net_pkt_iface(pkt)); net_pkt_set_family(pkt, PF_INET); return net_ipv4_process_pkt(pkt); #endif @@ -105,8 +107,8 @@ static inline enum net_verdict process_data(struct net_pkt *pkt, NET_DBG("Unknown IP family packet (0x%x)", NET_IPV6_HDR(pkt)->vtc & 0xf0); - net_stats_update_ip_errors_protoerr(); - net_stats_update_ip_errors_vhlerr(); + net_stats_update_ip_errors_protoerr(net_pkt_iface(pkt)); + net_stats_update_ip_errors_vhlerr(net_pkt_iface(pkt)); return NET_DROP; } @@ -128,6 +130,9 @@ static void processing_data(struct net_pkt *pkt, bool is_loopback) /* Things to setup after we are able to RX and TX */ static void net_post_init(void) { +#if defined(CONFIG_NET_GPTP) + net_gptp_init(); +#endif } static void init_rx_queues(void) @@ -254,10 +259,10 @@ int net_send_data(struct net_pkt *pkt) #if defined(CONFIG_NET_STATISTICS) switch (net_pkt_family(pkt)) { case AF_INET: - net_stats_update_ipv4_sent(); + net_stats_update_ipv4_sent(net_pkt_iface(pkt)); break; case AF_INET6: - net_stats_update_ipv6_sent(); + net_stats_update_ipv6_sent(net_pkt_iface(pkt)); break; } #endif @@ -294,7 +299,7 @@ static void net_rx(struct net_if *iface, struct net_pkt *pkt) NET_DBG("Received pkt %p len %zu", pkt, pkt_len); - net_stats_update_bytes_recv(pkt_len); + net_stats_update_bytes_recv(iface, pkt_len); processing_data(pkt, false); @@ -321,23 +326,26 @@ static void net_queue_rx(struct net_if *iface, struct net_pkt *pkt) #if defined(CONFIG_NET_STATISTICS) pkt->total_pkt_len = net_pkt_get_len(pkt); - net_stats_update_tc_recv_pkt(tc); - net_stats_update_tc_recv_bytes(tc, pkt->total_pkt_len); - net_stats_update_tc_recv_priority(tc, prio); + net_stats_update_tc_recv_pkt(iface, tc); + net_stats_update_tc_recv_bytes(iface, tc, pkt->total_pkt_len); + net_stats_update_tc_recv_priority(iface, tc, prio); #endif #if NET_TC_RX_COUNT > 1 NET_DBG("TC %d with prio %d pkt %p", tc, prio, pkt); #endif + net_pkt_set_traffic_class(pkt, tc); + net_tc_submit_to_rx_queue(tc, pkt); } /* Called by driver when an IP packet has been received */ int net_recv_data(struct net_if *iface, struct net_pkt *pkt) { - NET_ASSERT(pkt && pkt->frags); - NET_ASSERT(iface); + if (!pkt || !iface) { + return -EINVAL; + } if (!pkt->frags) { return -ENODATA; diff --git a/subsys/net/ip/net_if.c b/subsys/net/ip/net_if.c index 6a3d88bafb049..70e44acf372f4 100644 --- a/subsys/net/ip/net_if.c +++ b/subsys/net/ip/net_if.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -69,6 +70,24 @@ static sys_slist_t link_callbacks; static sys_slist_t mcast_monitor_callbacks; #endif +#if defined(CONFIG_NET_PKT_TIMESTAMP) +#if !defined(CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE) +#define CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE 1024 +#endif + +NET_STACK_DEFINE(TIMESTAMP, tx_ts_stack, + CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE, + CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE); +K_FIFO_DEFINE(tx_ts_queue); + +static struct k_thread tx_thread_ts; + +/* We keep track of the timestamp callbacks in this list. + */ +static sys_slist_t timestamp_callbacks; +static int timestamp_enabled; +#endif /* CONFIG_NET_PKT_TIMESTAMP */ + #if defined(CONFIG_NET_DEBUG_IF) #if defined(CONFIG_NET_STATISTICS) #define debug_check_packet(pkt) \ @@ -106,12 +125,12 @@ static inline void net_context_send_cb(struct net_context *context, #if defined(CONFIG_NET_UDP) if (net_context_get_ip_proto(context) == IPPROTO_UDP) { - net_stats_update_udp_sent(); + net_stats_update_udp_sent(net_context_get_iface(context)); } else #endif #if defined(CONFIG_NET_TCP) if (net_context_get_ip_proto(context) == IPPROTO_TCP) { - net_stats_update_tcp_seg_sent(); + net_stats_update_tcp_seg_sent(net_context_get_iface(context)); } else #endif { @@ -156,7 +175,7 @@ static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt) net_pkt_unref(pkt); } else { - net_stats_update_bytes_sent(pkt->total_pkt_len); + net_stats_update_bytes_sent(iface, pkt->total_pkt_len); } if (context) { @@ -192,15 +211,17 @@ void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt) #if defined(CONFIG_NET_STATISTICS) pkt->total_pkt_len = net_pkt_get_len(pkt); - net_stats_update_tc_sent_pkt(tc); - net_stats_update_tc_sent_bytes(tc, pkt->total_pkt_len); - net_stats_update_tc_sent_priority(tc, prio); + net_stats_update_tc_sent_pkt(iface, tc); + net_stats_update_tc_sent_bytes(iface, tc, pkt->total_pkt_len); + net_stats_update_tc_sent_priority(iface, tc, prio); #endif #if NET_TC_TX_COUNT > 1 NET_DBG("TC %d with prio %d pkt %p", tc, prio, pkt); #endif + net_pkt_set_traffic_class(pkt, tc); + net_tc_submit_to_tx_queue(tc, pkt); } @@ -1734,6 +1755,22 @@ bool net_if_ipv4_addr_mask_cmp(struct net_if *iface, return false; } +struct net_if *net_if_ipv4_select_src_iface(struct in_addr *dst) +{ + struct net_if *iface; + + for (iface = __net_if_start; iface != __net_if_end; iface++) { + bool ret; + + ret = net_if_ipv4_addr_mask_cmp(iface, dst); + if (ret) { + return iface; + } + } + + return net_if_get_default(); +} + struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr, struct net_if **ret) { @@ -2138,6 +2175,96 @@ int net_if_down(struct net_if *iface) return 0; } +#if defined(CONFIG_NET_PKT_TIMESTAMP) +#if defined(CONFIG_NET_STATISTICS) +void net_if_update_rx_timestamp_stats(struct net_pkt *pkt) +{ + u32_t now = k_cycle_get_32(); + s32_t diff = (s32_t)(now - pkt->cycles_update); + + diff = abs(diff); + + net_stats_update_pkt_rx_timestamp(net_pkt_traffic_class(pkt), + SYS_CLOCK_HW_CYCLES_TO_NS64(diff)); +} + +void net_if_update_tx_timestamp_stats(struct net_pkt *pkt) +{ + s32_t diff = (s32_t)(pkt->cycles_update - pkt->cycles_create); + + diff = abs(diff); + + net_stats_update_pkt_tx_timestamp(net_pkt_traffic_class(pkt), + SYS_CLOCK_HW_CYCLES_TO_NS64(diff)); +} +#endif /* CONFIG_NET_STATISTICS */ + +static void net_tx_ts_thread(void) +{ + struct net_pkt *pkt; + + NET_DBG("Starting TX timestamp callback thread"); + + while (1) { + pkt = k_fifo_get(&tx_ts_queue, K_FOREVER); + if (pkt) { + net_if_update_tx_timestamp_stats(pkt); + + net_if_call_timestamp_cb(pkt); + } + + k_yield(); + } +} + +void net_if_register_timestamp_cb(struct net_if_timestamp_cb *timestamp, + struct net_if *iface, + net_if_timestamp_callback_t cb) +{ + sys_slist_find_and_remove(×tamp_callbacks, ×tamp->node); + sys_slist_prepend(×tamp_callbacks, ×tamp->node); + + timestamp->iface = iface; + timestamp->cb = cb; + timestamp_enabled++; +} + +void net_if_unregister_timestamp_cb(struct net_if_timestamp_cb *timestamp) +{ + sys_slist_find_and_remove(×tamp_callbacks, ×tamp->node); + + timestamp_enabled--; + if (timestamp_enabled <= 0) { + timestamp_enabled = 0; + } +} + +void net_if_call_timestamp_cb(struct net_pkt *pkt) +{ + sys_snode_t *sn, *sns; + + SYS_SLIST_FOR_EACH_NODE_SAFE(×tamp_callbacks, sn, sns) { + struct net_if_timestamp_cb *timestamp = + CONTAINER_OF(sn, struct net_if_timestamp_cb, node); + + if ((timestamp->iface == NULL) || + (timestamp->iface == net_pkt_iface(pkt))) { + timestamp->cb(pkt); + } + } +} + +void net_if_add_tx_timestamp(struct net_pkt *pkt) +{ + /* No need to proceed if there are no one interested in this info */ + if (timestamp_enabled == 0) { + return; + } + + k_fifo_put(&tx_ts_queue, pkt); +} +#endif /* CONFIG_NET_PKT_TIMESTAMP */ + void net_if_init(void) { struct net_if *iface; @@ -2192,6 +2319,31 @@ void net_if_init(void) #endif } #endif /* CONFIG_NET_IPV6 */ + +#if defined(CONFIG_NET_PKT_TIMESTAMP) + k_thread_create(&tx_thread_ts, tx_ts_stack, + K_THREAD_STACK_SIZEOF(tx_ts_stack), + (k_thread_entry_t)net_tx_ts_thread, + NULL, NULL, NULL, K_PRIO_PREEMPT(1), 0, 0); +#endif /* CONFIG_NET_PKT_TIMESTAMP */ + +#if defined(CONFIG_NET_VLAN) + /* Make sure that we do not have too many network interfaces + * compared to the number of VLAN interfaces. + */ + for (iface = __net_if_start, if_count = 0; + iface != __net_if_end; iface++) { + if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) { + if_count++; + } + } + + if (if_count > CONFIG_NET_VLAN_COUNT) { + NET_WARN("You have configured only %d VLAN interfaces" + " but you have %d network interfaces.", + CONFIG_NET_VLAN_COUNT, if_count); + } +#endif } void net_if_post_init(void) diff --git a/subsys/net/ip/net_pkt.c b/subsys/net/ip/net_pkt.c index 6a62120d4004c..5a6122a1a53dd 100644 --- a/subsys/net/ip/net_pkt.c +++ b/subsys/net/ip/net_pkt.c @@ -322,6 +322,12 @@ struct net_pkt *net_pkt_get_reserve(struct k_mem_slab *slab, net_pkt_set_priority(pkt, CONFIG_NET_TX_DEFAULT_PRIORITY); #endif + net_pkt_set_vlan_tag(pkt, NET_VLAN_TAG_UNSPEC); + +#if defined(CONFIG_NET_PKT_TIMESTAMP) && defined(CONFIG_NET_STATISTICS) + pkt->cycles_create = k_cycle_get_32(); +#endif + #if defined(CONFIG_NET_DEBUG_NET_PKT) net_pkt_alloc_add(pkt, true, caller, line); @@ -2007,6 +2013,7 @@ struct net_pkt *net_pkt_clone(struct net_pkt *pkt, s32_t timeout) net_pkt_set_next_hdr(clone, NULL); net_pkt_set_ip_hdr_len(clone, net_pkt_ip_hdr_len(pkt)); + net_pkt_set_vlan_tag(clone, net_pkt_vlan_tag(pkt)); net_pkt_set_family(clone, net_pkt_family(pkt)); diff --git a/subsys/net/ip/net_private.h b/subsys/net/ip/net_private.h index 3aa1de6e2ce40..85d7c171c6009 100644 --- a/subsys/net/ip/net_private.h +++ b/subsys/net/ip/net_private.h @@ -28,6 +28,25 @@ extern void net_tc_rx_init(void); extern void net_tc_submit_to_tx_queue(u8_t tc, struct net_pkt *pkt); extern void net_tc_submit_to_rx_queue(u8_t tc, struct net_pkt *pkt); +#if defined(CONFIG_NET_GPTP) +/** + * @brief Initialize Precision Time Protocol Layer. + */ +void net_gptp_init(void); + +/** + * @brief Process a ptp message. + * + * @param buf Buffer with a valid PTP Ethernet type. + * + * @return Return the policy for network buffer. + */ +enum net_verdict net_gptp_recv(struct net_if *iface, struct net_pkt *pkt); +#else +#define net_gptp_init() +#define net_gptp_recv(iface, pkt) +#endif /* CONFIG_NET_GPTP */ + #if defined(CONFIG_NET_IPV6_FRAGMENT) int net_ipv6_send_fragmented_pkt(struct net_if *iface, struct net_pkt *pkt, u16_t pkt_len); diff --git a/subsys/net/ip/net_shell.c b/subsys/net/ip/net_shell.c index 1fe8f85d42995..b3b6dcfcfc309 100644 --- a/subsys/net/ip/net_shell.c +++ b/subsys/net/ip/net_shell.c @@ -47,6 +47,18 @@ #include #endif +#if defined(CONFIG_NET_VLAN) +#include +#endif + +#if defined(CONFIG_NET_GPTP) +#include +#include +#include +#include +#include +#endif + #include "net_shell.h" #include "net_stats.h" @@ -165,6 +177,9 @@ static void iface_cb(struct net_if *iface, void *user_data) #endif #if defined(CONFIG_NET_IPV4) struct net_if_ipv4 *ipv4; +#endif +#if defined(CONFIG_NET_VLAN) + struct ethernet_context *eth_ctx; #endif struct net_if_addr *unicast; struct net_if_mcast_addr *mcast; @@ -187,6 +202,28 @@ static void iface_cb(struct net_if *iface, void *user_data) net_if_get_link_addr(iface)->len)); printk("MTU : %d\n", net_if_get_mtu(iface)); +#if defined(CONFIG_NET_VLAN) + if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) { + eth_ctx = net_if_l2_data(iface); + + if (eth_ctx->vlan_enabled) { + for (i = 0; i < CONFIG_NET_VLAN_COUNT; i++) { + if (eth_ctx->vlan[i].iface != iface || + eth_ctx->vlan[i].tag == + NET_VLAN_TAG_UNSPEC) { + continue; + } + + printk("VLAN tag : %d (0x%x)\n", + eth_ctx->vlan[i].tag, + eth_ctx->vlan[i].tag); + } + } else { + printk("VLAN not enabled\n"); + } + } +#endif + #if defined(CONFIG_NET_IPV6) count = 0; @@ -472,120 +509,134 @@ static const char *priority2str(enum net_priority priority) } #endif -static inline void net_shell_print_statistics(void) +static void net_shell_print_statistics(struct net_if *iface, void *user_data) { + ARG_UNUSED(user_data); + + if (iface) { + const char *extra; + + printk("\nInterface %p (%s) [%d]\n", iface, + iface2str(iface, &extra), + net_if_get_by_iface(iface)); + printk("===========================%s\n", extra); + } else { + printk("\nGlobal statistics\n"); + printk("=================\n"); + } + #if defined(CONFIG_NET_IPV6) printk("IPv6 recv %d\tsent\t%d\tdrop\t%d\tforwarded\t%d\n", - GET_STAT(ipv6.recv), - GET_STAT(ipv6.sent), - GET_STAT(ipv6.drop), - GET_STAT(ipv6.forwarded)); + GET_STAT(iface, ipv6.recv), + GET_STAT(iface, ipv6.sent), + GET_STAT(iface, ipv6.drop), + GET_STAT(iface, ipv6.forwarded)); #if defined(CONFIG_NET_IPV6_ND) printk("IPv6 ND recv %d\tsent\t%d\tdrop\t%d\n", - GET_STAT(ipv6_nd.recv), - GET_STAT(ipv6_nd.sent), - GET_STAT(ipv6_nd.drop)); + GET_STAT(iface, ipv6_nd.recv), + GET_STAT(iface, ipv6_nd.sent), + GET_STAT(iface, ipv6_nd.drop)); #endif /* CONFIG_NET_IPV6_ND */ #if defined(CONFIG_NET_STATISTICS_MLD) printk("IPv6 MLD recv %d\tsent\t%d\tdrop\t%d\n", - GET_STAT(ipv6_mld.recv), - GET_STAT(ipv6_mld.sent), - GET_STAT(ipv6_mld.drop)); + GET_STAT(iface, ipv6_mld.recv), + GET_STAT(iface, ipv6_mld.sent), + GET_STAT(iface, ipv6_mld.drop)); #endif /* CONFIG_NET_STATISTICS_MLD */ #endif /* CONFIG_NET_IPV6 */ #if defined(CONFIG_NET_IPV4) printk("IPv4 recv %d\tsent\t%d\tdrop\t%d\tforwarded\t%d\n", - GET_STAT(ipv4.recv), - GET_STAT(ipv4.sent), - GET_STAT(ipv4.drop), - GET_STAT(ipv4.forwarded)); + GET_STAT(iface, ipv4.recv), + GET_STAT(iface, ipv4.sent), + GET_STAT(iface, ipv4.drop), + GET_STAT(iface, ipv4.forwarded)); #endif /* CONFIG_NET_IPV4 */ printk("IP vhlerr %d\thblener\t%d\tlblener\t%d\n", - GET_STAT(ip_errors.vhlerr), - GET_STAT(ip_errors.hblenerr), - GET_STAT(ip_errors.lblenerr)); + GET_STAT(iface, ip_errors.vhlerr), + GET_STAT(iface, ip_errors.hblenerr), + GET_STAT(iface, ip_errors.lblenerr)); printk("IP fragerr %d\tchkerr\t%d\tprotoer\t%d\n", - GET_STAT(ip_errors.fragerr), - GET_STAT(ip_errors.chkerr), - GET_STAT(ip_errors.protoerr)); + GET_STAT(iface, ip_errors.fragerr), + GET_STAT(iface, ip_errors.chkerr), + GET_STAT(iface, ip_errors.protoerr)); printk("ICMP recv %d\tsent\t%d\tdrop\t%d\n", - GET_STAT(icmp.recv), - GET_STAT(icmp.sent), - GET_STAT(icmp.drop)); + GET_STAT(iface, icmp.recv), + GET_STAT(iface, icmp.sent), + GET_STAT(iface, icmp.drop)); printk("ICMP typeer %d\tchkerr\t%d\n", - GET_STAT(icmp.typeerr), - GET_STAT(icmp.chkerr)); + GET_STAT(iface, icmp.typeerr), + GET_STAT(iface, icmp.chkerr)); #if defined(CONFIG_NET_UDP) printk("UDP recv %d\tsent\t%d\tdrop\t%d\n", - GET_STAT(udp.recv), - GET_STAT(udp.sent), - GET_STAT(udp.drop)); + GET_STAT(iface, udp.recv), + GET_STAT(iface, udp.sent), + GET_STAT(iface, udp.drop)); printk("UDP chkerr %d\n", - GET_STAT(udp.chkerr)); + GET_STAT(iface, udp.chkerr)); #endif #if defined(CONFIG_NET_STATISTICS_TCP) printk("TCP bytes recv %u\tsent\t%d\n", - GET_STAT(tcp.bytes.received), - GET_STAT(tcp.bytes.sent)); + GET_STAT(iface, tcp.bytes.received), + GET_STAT(iface, tcp.bytes.sent)); printk("TCP seg recv %d\tsent\t%d\tdrop\t%d\n", - GET_STAT(tcp.recv), - GET_STAT(tcp.sent), - GET_STAT(tcp.drop)); + GET_STAT(iface, tcp.recv), + GET_STAT(iface, tcp.sent), + GET_STAT(iface, tcp.drop)); printk("TCP seg resent %d\tchkerr\t%d\tackerr\t%d\n", - GET_STAT(tcp.resent), - GET_STAT(tcp.chkerr), - GET_STAT(tcp.ackerr)); + GET_STAT(iface, tcp.resent), + GET_STAT(iface, tcp.chkerr), + GET_STAT(iface, tcp.ackerr)); printk("TCP seg rsterr %d\trst\t%d\tre-xmit\t%d\n", - GET_STAT(tcp.rsterr), - GET_STAT(tcp.rst), - GET_STAT(tcp.rexmit)); + GET_STAT(iface, tcp.rsterr), + GET_STAT(iface, tcp.rst), + GET_STAT(iface, tcp.rexmit)); printk("TCP conn drop %d\tconnrst\t%d\n", - GET_STAT(tcp.conndrop), - GET_STAT(tcp.connrst)); + GET_STAT(iface, tcp.conndrop), + GET_STAT(iface, tcp.connrst)); #endif #if defined(CONFIG_NET_STATISTICS_RPL) printk("RPL DIS recv %d\tsent\t%d\tdrop\t%d\n", - GET_STAT(rpl.dis.recv), - GET_STAT(rpl.dis.sent), - GET_STAT(rpl.dis.drop)); + GET_STAT(iface, rpl.dis.recv), + GET_STAT(iface, rpl.dis.sent), + GET_STAT(iface, rpl.dis.drop)); printk("RPL DIO recv %d\tsent\t%d\tdrop\t%d\n", - GET_STAT(rpl.dio.recv), - GET_STAT(rpl.dio.sent), - GET_STAT(rpl.dio.drop)); + GET_STAT(iface, rpl.dio.recv), + GET_STAT(iface, rpl.dio.sent), + GET_STAT(iface, rpl.dio.drop)); printk("RPL DAO recv %d\tsent\t%d\tdrop\t%d\tforwarded\t%d\n", - GET_STAT(rpl.dao.recv), - GET_STAT(rpl.dao.sent), - GET_STAT(rpl.dao.drop), - GET_STAT(rpl.dao.forwarded)); + GET_STAT(iface, rpl.dao.recv), + GET_STAT(iface, rpl.dao.sent), + GET_STAT(iface, rpl.dao.drop), + GET_STAT(iface, rpl.dao.forwarded)); printk("RPL DAOACK rcv %d\tsent\t%d\tdrop\t%d\n", - GET_STAT(rpl.dao_ack.recv), - GET_STAT(rpl.dao_ack.sent), - GET_STAT(rpl.dao_ack.drop)); + GET_STAT(iface, rpl.dao_ack.recv), + GET_STAT(iface, rpl.dao_ack.sent), + GET_STAT(iface, rpl.dao_ack.drop)); printk("RPL overflows %d\tl-repairs\t%d\tg-repairs\t%d\n", - GET_STAT(rpl.mem_overflows), - GET_STAT(rpl.local_repairs), - GET_STAT(rpl.global_repairs)); + GET_STAT(iface, rpl.mem_overflows), + GET_STAT(iface, rpl.local_repairs), + GET_STAT(iface, rpl.global_repairs)); printk("RPL malformed %d\tresets \t%d\tp-switch\t%d\n", - GET_STAT(rpl.malformed_msgs), - GET_STAT(rpl.resets), - GET_STAT(rpl.parent_switch)); + GET_STAT(iface, rpl.malformed_msgs), + GET_STAT(iface, rpl.resets), + GET_STAT(iface, rpl.parent_switch)); printk("RPL f-errors %d\tl-errors\t%d\tl-warnings\t%d\n", - GET_STAT(rpl.forward_errors), - GET_STAT(rpl.loop_errors), - GET_STAT(rpl.loop_warnings)); + GET_STAT(iface, rpl.forward_errors), + GET_STAT(iface, rpl.loop_errors), + GET_STAT(iface, rpl.loop_warnings)); printk("RPL r-repairs %d\n", - GET_STAT(rpl.root_repairs)); + GET_STAT(iface, rpl.root_repairs)); #endif - printk("Bytes received %u\n", GET_STAT(bytes.received)); - printk("Bytes sent %u\n", GET_STAT(bytes.sent)); - printk("Processing err %d\n", GET_STAT(processing_error)); + printk("Bytes received %u\n", GET_STAT(iface, bytes.received)); + printk("Bytes sent %u\n", GET_STAT(iface, bytes.sent)); + printk("Processing err %d\n", GET_STAT(iface, processing_error)); #if NET_TC_COUNT > 1 { @@ -593,31 +644,77 @@ static inline void net_shell_print_statistics(void) #if NET_TC_TX_COUNT > 1 printk("TX traffic class statistics:\n"); - printk("TC Priority\tSent pkts\tbytes\n"); + printk("TC Priority\tSent pkts\tbytes\n"); for (i = 0; i < NET_TC_TX_COUNT; i++) { printk("[%d] %s (%d)\t%d\t\t%d\n", i, - priority2str(GET_STAT(tc.sent[i].priority)), - GET_STAT(tc.sent[i].priority), - GET_STAT(tc.sent[i].pkts), - GET_STAT(tc.sent[i].bytes)); + priority2str(GET_STAT(iface, + tc.sent[i].priority)), + GET_STAT(iface, tc.sent[i].priority), + GET_STAT(iface, tc.sent[i].pkts), + GET_STAT(iface, tc.sent[i].bytes)); } #endif #if NET_TC_RX_COUNT > 1 printk("RX traffic class statistics:\n"); - printk("TC Priority\tRecv pkts\tbytes\n"); + printk("TC Priority\tRecv pkts\tbytes\n"); for (i = 0; i < NET_TC_RX_COUNT; i++) { printk("[%d] %s (%d)\t%d\t\t%d\n", i, - priority2str(GET_STAT(tc.recv[i].priority)), - GET_STAT(tc.recv[i].priority), - GET_STAT(tc.recv[i].pkts), - GET_STAT(tc.recv[i].bytes)); + priority2str(GET_STAT(iface, + tc.recv[i].priority)), + GET_STAT(iface, tc.recv[i].priority), + GET_STAT(iface, tc.recv[i].pkts), + GET_STAT(iface, tc.recv[i].bytes)); } - } #endif + } #endif /* NET_TC_COUNT > 1 */ + +#if (NET_TC_COUNT > 1) && defined(CONFIG_NET_PKT_TIMESTAMP) + { + int i; + +#if NET_TC_TX_COUNT > 1 + printk("TX timestamp statistics:\n"); + printk("TC Low\tAvg\tHigh (in nanoseconds)\n"); + + for (i = 0; i < NET_TC_TX_COUNT; i++) { + if (GET_STAT(ts.tx[i].time.low) == 0 && + GET_STAT(ts.tx[i].time.average) == 0 && + GET_STAT(ts.tx[i].time.high) == 0) { + continue; + } + + printk("[%d] %s %u\t%u\t%u\n", i, + priority2str(GET_STAT(tc.sent[i].priority)), + GET_STAT(ts.tx[i].time.low), + GET_STAT(ts.tx[i].time.average), + GET_STAT(ts.tx[i].time.high)); + } +#endif + +#if NET_TC_RX_COUNT > 1 + printk("RX timestamp statistics:\n"); + printk("TC Low\tAvg\tHigh (in nanoseconds)\n"); + + for (i = 0; i < NET_TC_RX_COUNT; i++) { + if (GET_STAT(ts.rx[i].time.low) == 0 && + GET_STAT(ts.rx[i].time.average) == 0 && + GET_STAT(ts.rx[i].time.high) == 0) { + continue; + } + + printk("[%d] %s %u\t%u\t%u\n", i, + priority2str(GET_STAT(tc.recv[i].priority)), + GET_STAT(ts.rx[i].time.low), + GET_STAT(ts.rx[i].time.average), + GET_STAT(ts.rx[i].time.high)); + } +#endif + } +#endif /* (NET_TC_COUNT > 1) && CONFIG_NET_PKT_TIMESTAMP */ } #endif /* CONFIG_NET_STATISTICS */ @@ -1459,6 +1556,541 @@ int net_shell_cmd_dns(int argc, char *argv[]) return 0; } +#if defined(CONFIG_NET_GPTP) +static void gptp_port_cb(int port, struct net_if *iface, void *user_data) +{ + int *count = user_data; + + if (*count == 0) { + printk("Port Interface\n"); + } + + (*count)++; + + printk("%2d %p\n", port, iface); +} + +static const char *pdelay_req2str(enum gptp_pdelay_req_states state) +{ + switch (state) { + case GPTP_PDELAY_REQ_NOT_ENABLED: + return "REQ_NOT_ENABLED"; + case GPTP_PDELAY_REQ_INITIAL_SEND_REQ: + return "INITIAL_SEND_REQ"; + case GPTP_PDELAY_REQ_RESET: + return "REQ_RESET"; + case GPTP_PDELAY_REQ_SEND_REQ: + return "SEND_REQ"; + case GPTP_PDELAY_REQ_WAIT_RESP: + return "WAIT_RESP"; + case GPTP_PDELAY_REQ_WAIT_FOLLOW_UP: + return "WAIT_FOLLOW_UP"; + case GPTP_PDELAY_REQ_WAIT_ITV_TIMER: + return "WAIT_ITV_TIMER"; + } + + return ""; +}; + +static const char *pdelay_resp2str(enum gptp_pdelay_resp_states state) +{ + switch (state) { + case GPTP_PDELAY_RESP_NOT_ENABLED: + return "RESP_NOT_ENABLED"; + case GPTP_PDELAY_RESP_INITIAL_WAIT_REQ: + return "INITIAL_WAIT_REQ"; + case GPTP_PDELAY_RESP_WAIT_REQ: + return "WAIT_REQ"; + case GPTP_PDELAY_RESP_WAIT_TSTAMP: + return "WAIT_TSTAMP"; + } + + return ""; +} + +static const char *sync_rcv2str(enum gptp_sync_rcv_states state) +{ + switch (state) { + case GPTP_SYNC_RCV_DISCARD: + return "DISCARD"; + case GPTP_SYNC_RCV_WAIT_SYNC: + return "WAIT_SYNC"; + case GPTP_SYNC_RCV_WAIT_FOLLOW_UP: + return "WAIT_FOLLOW_UP"; + } + + return ""; +} + +static const char *sync_send2str(enum gptp_sync_send_states state) +{ + switch (state) { + case GPTP_SYNC_SEND_INITIALIZING: + return "INITIALIZING"; + case GPTP_SYNC_SEND_SEND_SYNC: + return "SEND_SYNC"; + case GPTP_SYNC_SEND_SEND_FUP: + return "SEND_FUP"; + } + + return ""; +} + +static const char *pss_rcv2str(enum gptp_pss_rcv_states state) +{ + switch (state) { + case GPTP_PSS_RCV_DISCARD: + return "DISCARD"; + case GPTP_PSS_RCV_RECEIVED_SYNC: + return "RECEIVED_SYNC"; + } + + return ""; +} + +static const char *pss_send2str(enum gptp_pss_send_states state) +{ + switch (state) { + case GPTP_PSS_SEND_TRANSMIT_INIT: + return "TRANSMIT_INIT"; + case GPTP_PSS_SEND_SYNC_RECEIPT_TIMEOUT: + return "SYNC_RECEIPT_TIMEOUT"; + case GPTP_PSS_SEND_SEND_MD_SYNC: + return "SEND_MD_SYNC"; + case GPTP_PSS_SEND_SET_SYNC_RECEIPT_TIMEOUT: + return "SET_SYNC_RECEIPT_TIMEOUT"; + } + + return ""; +} + +static const char *pa_rcv2str(enum gptp_pa_rcv_states state) +{ + switch (state) { + case GPTP_PA_RCV_DISCARD: + return "DISCARD"; + case GPTP_PA_RCV_RECEIVE: + return "RECEIVE"; + } + + return ""; +}; + +static const char *pa_info2str(enum gptp_pa_info_states state) +{ + switch (state) { + case GPTP_PA_INFO_DISABLED: + return "DISABLED"; + case GPTP_PA_INFO_POST_DISABLED: + return "POST_DISABLED"; + case GPTP_PA_INFO_AGED: + return "AGED"; + case GPTP_PA_INFO_UPDATE: + return "UPDATE"; + case GPTP_PA_INFO_CURRENT: + return "CURRENT"; + case GPTP_PA_INFO_RECEIVE: + return "RECEIVE"; + case GPTP_PA_INFO_SUPERIOR_MASTER_PORT: + return "SUPERIOR_MASTER_PORT"; + case GPTP_PA_INFO_REPEATED_MASTER_PORT: + return "REPEATED_MASTER_PORT"; + case GPTP_PA_INFO_INFERIOR_MASTER_OR_OTHER_PORT: + return "INFERIOR_MASTER_OR_OTHER_PORT"; + } + + return ""; +}; + +static const char *pa_transmit2str(enum gptp_pa_transmit_states state) +{ + switch (state) { + case GPTP_PA_TRANSMIT_INIT: + return "INIT"; + case GPTP_PA_TRANSMIT_PERIODIC: + return "PERIODIC"; + case GPTP_PA_TRANSMIT_IDLE: + return "IDLE"; + case GPTP_PA_TRANSMIT_POST_IDLE: + return "POST_IDLE"; + } + + return ""; +}; + +static const char *site_sync2str(enum gptp_site_sync_sync_states state) +{ + switch (state) { + case GPTP_SSS_INITIALIZING: + return "INITIALIZING"; + case GPTP_SSS_RECEIVING_SYNC: + return "RECEIVING_SYNC"; + } + + return ""; +} + +static const char *clk_slave2str(enum gptp_clk_slave_sync_states state) +{ + switch (state) { + case GPTP_CLK_SLAVE_SYNC_INITIALIZING: + return "INITIALIZING"; + case GPTP_CLK_SLAVE_SYNC_SEND_SYNC_IND: + return "SEND_SYNC_IND"; + } + + return ""; +}; + +static const char *pr_selection2str(enum gptp_pr_selection_states state) +{ + switch (state) { + case GPTP_PR_SELECTION_INIT_BRIDGE: + return "INIT_BRIDGE"; + case GPTP_PR_SELECTION_ROLE_SELECTION: + return "ROLE_SELECTION"; + } + + return ""; +}; + +static const char *cms_rcv2str(enum gptp_cms_rcv_states state) +{ + switch (state) { + case GPTP_CMS_RCV_INITIALIZING: + return "INITIALIZING"; + case GPTP_CMS_RCV_WAITING: + return "WAITING"; + case GPTP_CMS_RCV_SOURCE_TIME: + return "SOURCE_TIME"; + } + + return ""; +}; + +static void gptp_print_port_info(int port) +{ + struct gptp_port_bmca_data *port_bmca_data; + struct gptp_port_param_ds *port_param_ds; + struct gptp_port_states *port_state; + struct gptp_port_ds *port_ds; + struct net_if *iface; + int ret, i; + + ret = gptp_get_port_data(gptp_get_domain(), + port, + &port_ds, + &port_param_ds, + &port_state, + &port_bmca_data, + &iface); + if (ret < 0) { + printk("Cannot get gPTP information for port %d (%d)\n", + port, ret); + return; + } + + printk("Port id : %d\n", port_ds->port_id.port_number); + + printk("Clock id : "); + for (i = 0; i < sizeof(port_ds->port_id.clk_id); i++) { + printk("%02x", port_ds->port_id.clk_id[i]); + + if (i != (sizeof(port_ds->port_id.clk_id) - 1)) { + printk(":"); + } + } + printk("\n"); + + printk("Version : %d\n", port_ds->version); + printk("AS capable : %s\n", port_ds->as_capable ? "yes" : "no"); + + printk("\nConfiguration:\n"); + printk("Time synchronization and Best Master Selection enabled " + ": %s\n", port_ds->ptt_port_enabled ? "yes" : "no"); + printk("The port is measuring the path delay " + ": %s\n", port_ds->is_measuring_delay ? "yes" : "no"); + printk("One way propagation time on %s : %u\n", + "the link attached to this port", + (u32_t)port_ds->neighbor_prop_delay); + printk("Propagation time threshold for %s : %u\n", + "the link attached to this port", + (u32_t)port_ds->neighbor_prop_delay_thresh); + printk("Estimate of the ratio of the frequency with the peer " + ": %u\n", (u32_t)port_ds->neighbor_rate_ratio); + printk("Asymmetry on the link relative to the grand master time base " + ": %lld\n", port_ds->delay_asymmetry); + printk("Maximum interval between sync %s " + ": %llu\n", "messages", port_ds->sync_receipt_timeout_time_itv); + printk("Maximum number of Path Delay Requests without a response " + ": %d\n", port_ds->allowed_lost_responses); + printk("Current Sync %s : %d\n", + "sequence id for this port", port_ds->sync_seq_id); + printk("Current Path Delay Request %s : %d\n", + "sequence id for this port", port_ds->pdelay_req_seq_id); + printk("Current Announce %s : %d\n", + "sequence id for this port", port_ds->announce_seq_id); + printk("Current Signaling %s : %d\n", + "sequence id for this port", port_ds->signaling_seq_id); + printk("Whether neighborRateRatio %s : %s\n", + "needs to be computed for this port", + port_ds->compute_neighbor_rate_ratio ? "yes" : "no"); + printk("Whether neighborPropDelay %s : %s\n", + "needs to be computed for this port", + port_ds->compute_neighbor_prop_delay ? "yes" : "no"); + printk("Initial Announce Interval %s : %d\n", + "as a Logarithm to base 2", port_ds->ini_log_announce_itv); + printk("Current Announce Interval %s : %d\n", + "as a Logarithm to base 2", port_ds->cur_log_announce_itv); + printk("Initial Sync Interval %s : %d\n", + "as a Logarithm to base 2", port_ds->ini_log_half_sync_itv); + printk("Current Sync Interval %s : %d\n", + "as a Logarithm to base 2", port_ds->cur_log_half_sync_itv); + printk("Initial Path Delay Request Interval %s : %d\n", + "as a Logarithm to base 2", port_ds->ini_log_pdelay_req_itv); + printk("Current Path Delay Request Interval %s : %d\n", + "as a Logarithm to base 2", port_ds->cur_log_pdelay_req_itv); + printk("Time without receiving announce %s %s : %d\n", + "messages", "before running BMCA", + port_ds->announce_receipt_timeout); + printk("Time without receiving sync %s %s : %d\n", + "messages", "before running BMCA", + port_ds->sync_receipt_timeout); + printk("Sync event %s : %u.%llu\n", + "transmission interval for the port", + port_ds->half_sync_itv.high, + port_ds->half_sync_itv.low); + printk("Path Delay Request %s : %u.%llu\n", + "transmission interval for the port", + port_ds->pdelay_req_itv.high, + port_ds->pdelay_req_itv.low); + + printk("\nRuntime status:\n"); + printk("Path Delay Request state machine variables:\n"); + printk("\tCurrent state " + ": %s\n", pdelay_req2str(port_state->pdelay_req.state)); + printk("\tInitial Path Delay Response Peer Timestamp " + ": %llu\n", port_state->pdelay_req.ini_resp_evt_tstamp); + printk("\tInitial Path Delay Response Ingress Timestamp " + ": %llu\n", port_state->pdelay_req.ini_resp_ingress_tstamp); + printk("\tPath Delay Response %s %s : %u\n", + "messages", "received", + port_state->pdelay_req.rcvd_pdelay_resp); + printk("\tPath Delay Follow Up %s %s : %u\n", + "messages", "received", + port_state->pdelay_req.rcvd_pdelay_follow_up); + printk("\tNumber of lost Path Delay Responses " + ": %u\n", port_state->pdelay_req.lost_responses); + printk("\tTimer expired send a new Path Delay Request " + ": %u\n", port_state->pdelay_req.pdelay_timer_expired); + printk("\tNeighborRateRatio has been computed successfully " + ": %u\n", port_state->pdelay_req.neighbor_rate_ratio_valid); + printk("\tPath Delay has already been computed after init " + ": %u\n", port_state->pdelay_req.init_pdelay_compute); + printk("\tCount consecutive reqs with multiple responses " + ": %u\n", port_state->pdelay_req.multiple_resp_count); + + printk("Path Delay Response state machine variables:\n"); + printk("\tCurrent state " + ": %s\n", pdelay_resp2str(port_state->pdelay_resp.state)); + + printk("Sync Receive state machine variables:\n"); + printk("\tCurrent state " + ": %s\n", sync_rcv2str(port_state->sync_rcv.state)); + printk("\tA Sync %s %s : %s\n", + "Message", "has been received", + port_state->sync_rcv.rcvd_sync ? "yes" : "no"); + printk("\tA Follow Up %s %s : %s\n", + "Message", "has been received", + port_state->sync_rcv.rcvd_follow_up ? "yes" : "no"); + printk("\tA Follow Up %s %s : %s\n", + "Message", "has been received", + port_state->sync_rcv.follow_up_timeout_expired ? "yes" : "no"); + printk("\tTime at which a Sync %s without Follow Up\n" + "\t will be discarded " + ": %llu\n", "Message", + port_state->sync_rcv.follow_up_receipt_timeout); + + printk("Sync Send state machine variables:\n"); + printk("\tCurrent state " + ": %s\n", sync_send2str(port_state->sync_send.state)); + printk("\tA MDSyncSend structure %s : %s\n", + "has been received", + port_state->sync_send.rcvd_md_sync ? "yes" : "no"); + printk("\tThe timestamp for the sync msg %s : %s\n", + "has been received", + port_state->sync_send.md_sync_timestamp_avail ? "yes" : "no"); + + printk("PortSyncSync Receive state machine variables:\n"); + printk("\tCurrent state " + ": %s\n", pss_rcv2str(port_state->pss_rcv.state)); + printk("\tGrand Master / Local Clock frequency ratio " + ": %f\n", port_state->pss_rcv.rate_ratio); + printk("\tA MDSyncReceive struct is ready to be processed " + ": %s\n", port_state->pss_rcv.rcvd_md_sync ? "yes" : "no"); + printk("\tExpiry of SyncReceiptTimeoutTimer : %s\n", + port_state->pss_rcv.sync_receipt_timeout_timer_expired ? + "yes" : "no"); + + printk("PortSyncSync Send state machine variables:\n"); + printk("\tCurrent state " + ": %s\n", pss_send2str(port_state->pss_send.state)); + printk("\tFollow Up Correction Field of last recv PSS " + ": %lld\n", + port_state->pss_send.last_follow_up_correction_field); + printk("\tUpstream Tx Time of the last recv PortSyncSync " + ": %llu\n", port_state->pss_send.last_upstream_tx_time); + printk("\tSync Receipt Timeout Time of last recv PSS " + ": %llu\n", + port_state->pss_send.last_sync_receipt_timeout_time); + printk("\tRate Ratio of the last received PortSyncSync " + ": %f\n", + port_state->pss_send.last_rate_ratio); + printk("\tGM Freq Change of the last received PortSyncSync " + ": %f\n", port_state->pss_send.last_gm_freq_change); + printk("\tGM Time Base Indicator of last recv PortSyncSync " + ": %d\n", port_state->pss_send.last_gm_time_base_indicator); + printk("\tReceived Port Number of last recv PortSyncSync " + ": %d\n", + port_state->pss_send.last_rcvd_port_num); + printk("\tPortSyncSync structure is ready to be processed " + ": %s\n", port_state->pss_send.rcvd_pss_sync ? "yes" : "no"); + printk("\tFlag when the %s has expired : %s\n", + "half_sync_itv_timer", + port_state->pss_send.half_sync_itv_timer_expired ? + "yes" : "no"); + printk("\tHas %s expired twice : %s\n", + "half_sync_itv_timer", + port_state->pss_send.sync_itv_timer_expired ? "yes" : "no"); + printk("\tHas syncReceiptTimeoutTime expired " + ": %s\n", + port_state->pss_send.sync_receipt_timeout_timer_expired ? + "yes" : "no"); + + printk("PortAnnounce Receive state machine variables:\n"); + printk("\tCurrent state " + ": %s\n", pa_rcv2str(port_state->pa_rcv.state)); + printk("\tAn announce message is ready to be processed " + ": %s\n", + port_state->pa_rcv.rcvd_announce ? "yes" : "no"); + + printk("PortAnnounce Information state machine variables:\n"); + printk("\tCurrent state " + ": %s\n", pa_info2str(port_state->pa_info.state)); + printk("\tExpired announce information " + ": %s\n", port_state->pa_info.ann_expired ? "yes" : "no"); + + printk("PortAnnounce Transmit state machine variables:\n"); + printk("\tCurrent state " + ": %s\n", pa_transmit2str(port_state->pa_transmit.state)); + printk("\tTrigger announce information " + ": %s\n", port_state->pa_transmit.ann_trigger ? "yes" : "no"); + +#if defined(CONFIG_NET_GPTP_STATISTICS) + printk("\nStatistics:\n"); + printk("Sync %s %s : %u\n", + "messages", "received", port_param_ds->rx_sync_count); + printk("Follow Up %s %s : %u\n", + "messages", "received", port_param_ds->rx_fup_count); + printk("Path Delay Request %s %s : %u\n", + "messages", "received", port_param_ds->rx_pdelay_req_count); + printk("Path Delay Response %s %s : %u\n", + "messages", "received", port_param_ds->rx_pdelay_resp_count); + printk("PDelay %s threshold exceeded : %u\n", + "messages", port_param_ds->neighbor_prop_delay_exceeded); + printk("Path Delay Follow Up %s %s : %u\n", + "messages", "received", port_param_ds->rx_pdelay_resp_fup_count); + printk("Announce %s %s : %u\n", + "messages", "received", port_param_ds->rx_announce_count); + printk("ptp %s discarded : %u\n", + "messages", port_param_ds->rx_ptp_packet_discard_count); + printk("Sync %s %s : %u\n", + "reception", "timeout", + port_param_ds->sync_receipt_timeout_count); + printk("Announce %s %s : %u\n", + "reception", "timeout", + port_param_ds->announce_receipt_timeout_count); + printk("Path Delay Requests without a response " + ": %u\n", port_param_ds->pdelay_allowed_lost_resp_exceed_count); + printk("Sync %s %s : %u\n", + "messages", "sent", port_param_ds->tx_sync_count); + printk("Follow Up %s %s : %u\n", + "messages", "sent", port_param_ds->tx_fup_count); + printk("Path Delay Request %s %s : %u\n", + "messages", "sent", port_param_ds->tx_pdelay_req_count); + printk("Path Delay Response %s %s : %u\n", + "messages", "sent", port_param_ds->tx_pdelay_resp_count); + printk("Path Delay Response %s %s : %u\n", + "messages", "sent", port_param_ds->tx_pdelay_resp_fup_count); + printk("Announce %s %s : %u\n", + "messages", "sent", port_param_ds->tx_announce_count); +#endif /* CONFIG_NET_GPTP_STATISTICS */ +} +#endif /* CONFIG_NET_GPTP */ + +int net_shell_cmd_gptp(int argc, char *argv[]) +{ +#if defined(CONFIG_NET_GPTP) + /* gPTP status */ + struct gptp_domain *domain = gptp_get_domain(); + int count = 0; + int arg = 1; + + if (strcmp(argv[0], "gptp")) { + arg++; + } + + if (argv[arg]) { + int port = strtol(argv[arg], NULL, 10); + + gptp_print_port_info(port); + } else { + gptp_foreach_port(gptp_port_cb, &count); + + printk("\n"); + + printk("SiteSyncSync state machine variables:\n"); + printk("\tCurrent state " + ": %s\n", site_sync2str(domain->state.site_ss.state)); + printk("\tA PortSyncSync struct is ready " + ": %s\n", domain->state.site_ss.rcvd_pss ? "yes" : "no"); + + printk("ClockSlaveSync state machine variables:\n"); + printk("\tCurrent state " + ": %s\n", + clk_slave2str(domain->state.clk_slave_sync.state)); + printk("\tA PortSyncSync struct is ready " + ": %s\n", + domain->state.clk_slave_sync.rcvd_pss ? "yes" : "no"); + printk("\tThe local clock has expired " + ": %s\n", + domain->state.clk_slave_sync.rcvd_local_clk_tick ? + "yes" : "no"); + + printk("PortRoleSelection state machine variables:\n"); + printk("\tCurrent state " + ": %s\n", + pr_selection2str(domain->state.pr_sel.state)); + + printk("ClockMasterSyncReceive state machine variables:\n"); + printk("\tCurrent state " + ": %s\n", cms_rcv2str( + domain->state.clk_master_sync_receive.state)); + printk("\tA ClockSourceTime " + ": %s\n", + domain->state.clk_master_sync_receive.rcvd_clock_source_req ? + "yes" : "no"); + printk("\tThe local clock has expired " + ": %s\n", + domain->state.clk_master_sync_receive.rcvd_local_clock_tick ? + "yes" : "no"); + } +#else + printk("gPTP not supported, set CONFIG_NET_GPTP to enable it.\n"); +#endif + return 0; +} + #if defined(CONFIG_NET_DEBUG_HTTP_CONN) && defined(CONFIG_HTTP_SERVER) #define MAX_HTTP_OUTPUT_LEN 64 static char *http_str_output(char *output, int outlen, const char *str, int len) @@ -1552,14 +2184,75 @@ int net_shell_cmd_http(int argc, char *argv[]) int net_shell_cmd_iface(int argc, char *argv[]) { - ARG_UNUSED(argc); - ARG_UNUSED(argv); + int arg = 0; + if (strcmp(argv[arg], "iface") == 0) { + arg++; + } + + if (argv[arg]) { + bool up = false; + struct net_if *iface; + char *endptr = NULL; + int idx, ret; + + if (strcmp(argv[arg], "up") == 0) { + arg++; + up = true; + } else if (strcmp(argv[arg], "down") == 0) { + arg++; + } + + if (!argv[arg]) { + printk("Usage: net iface [up|down] [index]\n"); + return 0; + } + + idx = strtol(argv[arg], &endptr, 10); + if (*endptr != '\0') { + printk("Invalid index %s\n", argv[arg]); + return 0; + } + + if (idx < 0 || idx > 255) { + printk("Invalid index %d\n", idx); + return 0; + } + + iface = net_if_get_by_index(idx); + if (!iface) { + printk("No such interface in index %d\n", idx); + return 0; + } + + if (up) { + if (net_if_is_up(iface)) { + printk("Interface %d is already up.\n", idx); + return 0; + } + + ret = net_if_up(iface); + if (ret) { + printk("Cannot take interface %d up (%d)\n", + idx, ret); + } else { + printk("Interface %d is up\n", idx); + } + } else { + ret = net_if_down(iface); + if (ret) { + printk("Cannot take interface %d down (%d)\n", + idx, ret); + } else { + printk("Interface %d is down\n", idx); + } + } + } else { #if defined(CONFIG_NET_HOSTNAME_ENABLE) - printk("Hostname: %s\n\n", net_hostname_get()); + printk("Hostname: %s\n\n", net_hostname_get()); #endif - - net_if_foreach(iface_cb, NULL); + net_if_foreach(iface_cb, NULL); + } return 0; } @@ -1922,10 +2615,11 @@ static int _ping_ipv4(char *host) net_icmpv4_register_handler(&ping4_handler); - ret = net_icmpv4_send_echo_request(net_if_get_default(), - &ipv4_target, - sys_rand32_get(), - sys_rand32_get()); + ret = net_icmpv4_send_echo_request( + net_if_ipv4_select_src_iface(&ipv4_target), + &ipv4_target, + sys_rand32_get(), + sys_rand32_get()); if (ret) { _remove_ipv4_ping_handler(); } else { @@ -2280,14 +2974,56 @@ int net_shell_cmd_stacks(int argc, char *argv[]) return 0; } +#if defined(CONFIG_NET_STATISTICS_PER_INTERFACE) +static void net_shell_print_statistics_all(void) +{ + net_if_foreach(net_shell_print_statistics, NULL); +} +#endif + int net_shell_cmd_stats(int argc, char *argv[]) { +#if defined(CONFIG_NET_STATISTICS) + int arg = 0; + + if (strcmp(argv[arg], "stats") == 0) { + arg++; + } + + if (argv[arg]) { +#if defined(CONFIG_NET_STATISTICS_PER_INTERFACE) + if (strcmp(argv[arg], "all") == 0) { + net_shell_print_statistics_all(); + } else { + struct net_if *iface; + char *endptr = NULL; + int idx; + + idx = strtol(argv[arg], &endptr, 10); + if (*endptr != '\0') { + printk("Invalid index %s\n", argv[arg]); + return 0; + } + + iface = net_if_get_by_index(idx); + if (!iface) { + printk("No such interface in index %d\n", idx); + return 0; + } + + net_shell_print_statistics(iface, NULL); + } +#else + printk("Per network interface statistics not collected.\n"); + printk("Please enable CONFIG_NET_STATISTICS_PER_INTERFACE\n"); +#endif + } else { + net_shell_print_statistics(NULL, NULL); + } +#else ARG_UNUSED(argc); ARG_UNUSED(argv); -#if defined(CONFIG_NET_STATISTICS) - net_shell_print_statistics(); -#else printk("Network statistics not compiled in.\n"); #endif @@ -2590,6 +3326,152 @@ int net_shell_cmd_tcp(int argc, char *argv[]) return 0; } +#if defined(CONFIG_NET_VLAN) +static void iface_vlan_del_cb(struct net_if *iface, void *user_data) +{ + u16_t vlan_tag = POINTER_TO_UINT(user_data); + int ret; + + ret = net_eth_vlan_disable(iface, vlan_tag); + if (ret < 0) { + if (ret != -ESRCH) { + printk("Cannot delete VLAN tag %d from interface %p\n", + vlan_tag, iface); + } + + return; + } + + printk("VLAN tag %d removed from interface %p\n", + vlan_tag, iface); +} + +static void iface_vlan_cb(struct net_if *iface, void *user_data) +{ + struct ethernet_context *ctx = net_if_l2_data(iface); + int *count = user_data; + int i; + + if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) { + return; + } + + if (*count == 0) { + printk(" Interface Type Tag\n"); + } + + if (!ctx->vlan_enabled) { + printk("VLAN tag(s) not set\n"); + return; + } + + for (i = 0; i < NET_VLAN_MAX_COUNT; i++) { + if (!ctx->vlan[i].iface || ctx->vlan[i].iface != iface) { + continue; + } + + if (ctx->vlan[i].tag == NET_VLAN_TAG_UNSPEC) { + continue; + } + + printk("[%d] %p %s %d\n", net_if_get_by_iface(iface), iface, + iface2str(iface, NULL), ctx->vlan[i].tag); + + break; + } + + (*count)++; +} +#endif /* CONFIG_NET_VLAN */ + +int net_shell_cmd_vlan(int argc, char *argv[]) +{ +#if defined(CONFIG_NET_VLAN) + int arg = 1; + int ret; + u16_t tag; + + if (argv[arg]) { + if (!strcmp(argv[arg], "add")) { + /* vlan add */ + struct net_if *iface; + u32_t iface_idx; + + if (!argv[++arg]) { + printk("VLAN tag missing.\n"); + return 0; + } + + tag = strtol(argv[arg], NULL, 10); + + if (!argv[++arg]) { + printk("Network interface index missing.\n"); + return 0; + } + + iface_idx = strtol(argv[arg], NULL, 10); + + iface = net_if_get_by_index(iface_idx); + if (!iface) { + printk("Network interface index %d is " + "invalid.\n", iface_idx); + return 0; + } + + if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) { + printk("Network interface %p is not ethernet " + "interface\n", iface); + return 0; + } + + ret = net_eth_vlan_enable(iface, tag); + if (ret < 0) { + if (ret == -ENOENT) { + printk("No IP address configured.\n"); + } + + printk("Cannot set VLAN tag (%d)\n", ret); + + return 0; + } + + printk("VLAN tag %d set to interface %p\n", tag, + iface); + return 0; + } + + if (!strcmp(argv[arg], "del")) { + /* vlan del */ + + if (!argv[++arg]) { + printk("VLAN tag missing.\n"); + return 0; + } + + tag = strtol(argv[arg], NULL, 10); + + net_if_foreach(iface_vlan_del_cb, + UINT_TO_POINTER((u32_t)tag)); + + return 0; + } + + printk("Unknown command '%s'\n", argv[arg]); + printk("Usage:\n"); + printk("\tvlan add \n"); + printk("\tvlan del \n"); + } else { + int count = 0; + + net_if_foreach(iface_vlan_cb, &count); + } +#else + printk("Set CONFIG_NET_VLAN to enable virtual LAN support.\n"); +#endif /* CONFIG_NET_VLAN */ + + return 0; +} + static struct shell_cmd net_commands[] = { /* Keep the commands in alphabetical order */ { "allocs", net_shell_cmd_allocs, @@ -2605,12 +3487,17 @@ static struct shell_cmd net_commands[] = { "dns cancel\n\tCancel all pending requests\n" "dns [A or AAAA]\n\tQuery IPv4 address (default) or " "IPv6 address for a host name" }, + { "gptp", net_shell_cmd_gptp, + "\n\tPrint information about gPTP support\n" + "gptp \n\tPrint detailed information about gPTP port" }, { "http", net_shell_cmd_http, "\n\tPrint information about active HTTP connections\n" "http monitor\n\tStart monitoring HTTP connections\n" "http\n\tTurn off HTTP connection monitoring" }, { "iface", net_shell_cmd_iface, - "\n\tPrint information about network interfaces" }, + "\n\tPrint information about network interfaces\n" + "iface up [idx]\n\tTake network interface up\n" + "iface down [idx]\n\tTake network interface down" }, { "mem", net_shell_cmd_mem, "\n\tPrint information about network interfaces" }, { "nbr", net_shell_cmd_nbr, "\n\tPrint neighbor information\n" @@ -2620,10 +3507,20 @@ static struct shell_cmd net_commands[] = { { "rpl", net_shell_cmd_rpl, "\n\tShow RPL mesh routing status" }, { "stacks", net_shell_cmd_stacks, "\n\tShow network stacks information" }, - { "stats", net_shell_cmd_stats, "\n\tShow network statistics" }, + { "stats", net_shell_cmd_stats, + "\n\tShow network statistics\n" + "stats all\n\tShow network statistics for all network " + "interfaces\n" + "stats \n\tShow network statistics for one specific " + "network interfaces\n" }, { "tcp", net_shell_cmd_tcp, "connect port\n\tConnect to TCP peer\n" "tcp send \n\tSend data to peer using TCP\n" "tcp close\n\tClose TCP connection" }, + { "vlan", net_shell_cmd_vlan, "\n\tShow VLAN information\n" + "vlan add \n" + "\tAdd VLAN tag to the network interface\n" + "vlan del \n" + "\tDelete VLAN tag from the network interface\n" }, { NULL, NULL, NULL } }; diff --git a/subsys/net/ip/net_stats.c b/subsys/net/ip/net_stats.c index 9e99684d6eff4..a7a8b2a325e64 100644 --- a/subsys/net/ip/net_stats.c +++ b/subsys/net/ip/net_stats.c @@ -14,136 +14,208 @@ #include #include #include +#include #include "net_stats.h" -struct net_stats net_stats; +/* Global network statistics. + * + * The variable needs to be global so that the GET_STAT() macro can access it + * from net_shell.c + */ +struct net_stats net_stats = { 0 }; + +#if defined(CONFIG_NET_STATISTICS_PERIODIC_OUTPUT) + +#define PRINT_STATISTICS_INTERVAL K_SECONDS(30) -#ifdef CONFIG_NET_STATISTICS_PERIODIC_OUTPUT +#if NET_TC_COUNT > 1 +static const char *priority2str(enum net_priority priority) +{ + switch (priority) { + case NET_PRIORITY_BK: + return "BK"; /* Background */ + case NET_PRIORITY_BE: + return "BE"; /* Best effort */ + case NET_PRIORITY_EE: + return "EE"; /* Excellent effort */ + case NET_PRIORITY_CA: + return "CA"; /* Critical applications */ + case NET_PRIORITY_VI: + return "VI"; /* Video, < 100 ms latency and jitter */ + case NET_PRIORITY_VO: + return "VO"; /* Voice, < 10 ms latency and jitter */ + case NET_PRIORITY_IC: + return "IC"; /* Internetwork control */ + case NET_PRIORITY_NC: + return "NC"; /* Network control */ + } -#define PRINT_STATISTICS_INTERVAL (30 * MSEC_PER_SEC) + return "??"; +} +#endif -static inline void stats(void) +static inline void stats(struct net_if *iface) { static s64_t next_print; s64_t curr = k_uptime_get(); + int i; if (!next_print || (next_print < curr && (!((curr - next_print) > PRINT_STATISTICS_INTERVAL)))) { s64_t new_print; + if (iface) { + NET_INFO("Interface %p [%d]", iface, + net_if_get_by_iface(iface)); + } else { + NET_INFO("Global statistics:"); + } + #if defined(CONFIG_NET_STATISTICS_IPV6) NET_INFO("IPv6 recv %d\tsent\t%d\tdrop\t%d\tforwarded\t%d", - GET_STAT(ipv6.recv), - GET_STAT(ipv6.sent), - GET_STAT(ipv6.drop), - GET_STAT(ipv6.forwarded)); + GET_STAT(iface, ipv6.recv), + GET_STAT(iface, ipv6.sent), + GET_STAT(iface, ipv6.drop), + GET_STAT(iface, ipv6.forwarded)); #if defined(CONFIG_NET_STATISTICS_IPV6_ND) NET_INFO("IPv6 ND recv %d\tsent\t%d\tdrop\t%d", - GET_STAT(ipv6_nd.recv), - GET_STAT(ipv6_nd.sent), - GET_STAT(ipv6_nd.drop)); + GET_STAT(iface, ipv6_nd.recv), + GET_STAT(iface, ipv6_nd.sent), + GET_STAT(iface, ipv6_nd.drop)); #endif /* CONFIG_NET_STATISTICS_IPV6_ND */ #if defined(CONFIG_NET_STATISTICS_MLD) NET_INFO("IPv6 MLD recv %d\tsent\t%d\tdrop\t%d", - GET_STAT(ipv6_mld.recv), - GET_STAT(ipv6_mld.sent), - GET_STAT(ipv6_mld.drop)); + GET_STAT(iface, ipv6_mld.recv), + GET_STAT(iface, ipv6_mld.sent), + GET_STAT(iface, ipv6_mld.drop)); #endif /* CONFIG_NET_STATISTICS_MLD */ #endif /* CONFIG_NET_STATISTICS_IPV6 */ #if defined(CONFIG_NET_STATISTICS_IPV4) NET_INFO("IPv4 recv %d\tsent\t%d\tdrop\t%d\tforwarded\t%d", - GET_STAT(ipv4.recv), - GET_STAT(ipv4.sent), - GET_STAT(ipv4.drop), - GET_STAT(ipv4.forwarded)); + GET_STAT(iface, ipv4.recv), + GET_STAT(iface, ipv4.sent), + GET_STAT(iface, ipv4.drop), + GET_STAT(iface, ipv4.forwarded)); #endif /* CONFIG_NET_STATISTICS_IPV4 */ NET_INFO("IP vhlerr %d\thblener\t%d\tlblener\t%d", - GET_STAT(ip_errors.vhlerr), - GET_STAT(ip_errors.hblenerr), - GET_STAT(ip_errors.lblenerr)); + GET_STAT(iface, ip_errors.vhlerr), + GET_STAT(iface, ip_errors.hblenerr), + GET_STAT(iface, ip_errors.lblenerr)); NET_INFO("IP fragerr %d\tchkerr\t%d\tprotoer\t%d", - GET_STAT(ip_errors.fragerr), - GET_STAT(ip_errors.chkerr), - GET_STAT(ip_errors.protoerr)); + GET_STAT(iface, ip_errors.fragerr), + GET_STAT(iface, ip_errors.chkerr), + GET_STAT(iface, ip_errors.protoerr)); NET_INFO("ICMP recv %d\tsent\t%d\tdrop\t%d", - GET_STAT(icmp.recv), - GET_STAT(icmp.sent), - GET_STAT(icmp.drop)); + GET_STAT(iface, icmp.recv), + GET_STAT(iface, icmp.sent), + GET_STAT(iface, icmp.drop)); NET_INFO("ICMP typeer %d\tchkerr\t%d", - GET_STAT(icmp.typeerr), - GET_STAT(icmp.chkerr)); + GET_STAT(iface, icmp.typeerr), + GET_STAT(iface, icmp.chkerr)); #if defined(CONFIG_NET_STATISTICS_UDP) NET_INFO("UDP recv %d\tsent\t%d\tdrop\t%d", - GET_STAT(udp.recv), - GET_STAT(udp.sent), - GET_STAT(udp.drop)); + GET_STAT(iface, udp.recv), + GET_STAT(iface, udp.sent), + GET_STAT(iface, udp.drop)); NET_INFO("UDP chkerr %d", - GET_STAT(udp.chkerr)); + GET_STAT(iface, udp.chkerr)); #endif #if defined(CONFIG_NET_STATISTICS_TCP) NET_INFO("TCP bytes recv %u\tsent\t%d", - GET_STAT(tcp.bytes.received), - GET_STAT(tcp.bytes.sent)); + GET_STAT(iface, tcp.bytes.received), + GET_STAT(iface, tcp.bytes.sent)); NET_INFO("TCP seg recv %d\tsent\t%d\tdrop\t%d", - GET_STAT(tcp.recv), - GET_STAT(tcp.sent), - GET_STAT(tcp.drop)); + GET_STAT(iface, tcp.recv), + GET_STAT(iface, tcp.sent), + GET_STAT(iface, tcp.drop)); NET_INFO("TCP seg resent %d\tchkerr\t%d\tackerr\t%d", - GET_STAT(tcp.resent), - GET_STAT(tcp.chkerr), - GET_STAT(tcp.ackerr)); + GET_STAT(iface, tcp.resent), + GET_STAT(iface, tcp.chkerr), + GET_STAT(iface, tcp.ackerr)); NET_INFO("TCP seg rsterr %d\trst\t%d\tre-xmit\t%d", - GET_STAT(tcp.rsterr), - GET_STAT(tcp.rst), - GET_STAT(tcp.rexmit)); + GET_STAT(iface, tcp.rsterr), + GET_STAT(iface, tcp.rst), + GET_STAT(iface, tcp.rexmit)); NET_INFO("TCP conn drop %d\tconnrst\t%d", - GET_STAT(tcp.conndrop), - GET_STAT(tcp.connrst)); + GET_STAT(iface, tcp.conndrop), + GET_STAT(iface, tcp.connrst)); #endif #if defined(CONFIG_NET_STATISTICS_RPL) NET_INFO("RPL DIS recv %d\tsent\t%d\tdrop\t%d", - GET_STAT(rpl.dis.recv), - GET_STAT(rpl.dis.sent), - GET_STAT(rpl.dis.drop)); + GET_STAT(iface, rpl.dis.recv), + GET_STAT(iface, rpl.dis.sent), + GET_STAT(iface, rpl.dis.drop)); NET_INFO("RPL DIO recv %d\tsent\t%d\tdrop\t%d", - GET_STAT(rpl.dio.recv), - GET_STAT(rpl.dio.sent), - GET_STAT(rpl.dio.drop)); + GET_STAT(iface, rpl.dio.recv), + GET_STAT(iface, rpl.dio.sent), + GET_STAT(iface, rpl.dio.drop)); NET_INFO("RPL DAO recv %d\tsent\t%d\tdrop\t%d\tforwarded\t%d", - GET_STAT(rpl.dao.recv), - GET_STAT(rpl.dao.sent), - GET_STAT(rpl.dao.drop), - GET_STAT(rpl.dao.forwarded)); + GET_STAT(iface, rpl.dao.recv), + GET_STAT(iface, rpl.dao.sent), + GET_STAT(iface, rpl.dao.drop), + GET_STAT(iface, rpl.dao.forwarded)); NET_INFO("RPL DAOACK rcv %d\tsent\t%d\tdrop\t%d", - GET_STAT(rpl.dao_ack.recv), - GET_STAT(rpl.dao_ack.sent), - GET_STAT(rpl.dao_ack.drop)); + GET_STAT(iface, rpl.dao_ack.recv), + GET_STAT(iface, rpl.dao_ack.sent), + GET_STAT(iface, rpl.dao_ack.drop)); NET_INFO("RPL overflows %d\tl-repairs\t%d\tg-repairs\t%d", - GET_STAT(rpl.mem_overflows), - GET_STAT(rpl.local_repairs), - GET_STAT(rpl.global_repairs)); + GET_STAT(iface, rpl.mem_overflows), + GET_STAT(iface, rpl.local_repairs), + GET_STAT(iface, rpl.global_repairs)); NET_INFO("RPL malformed %d\tresets \t%d\tp-switch\t%d", - GET_STAT(rpl.malformed_msgs), - GET_STAT(rpl.resets), - GET_STAT(rpl.parent_switch)); + GET_STAT(iface, rpl.malformed_msgs), + GET_STAT(iface, rpl.resets), + GET_STAT(iface, rpl.parent_switch)); NET_INFO("RPL f-errors %d\tl-errors\t%d\tl-warnings\t%d", - GET_STAT(rpl.forward_errors), - GET_STAT(rpl.loop_errors), - GET_STAT(rpl.loop_warnings)); + GET_STAT(iface, rpl.forward_errors), + GET_STAT(iface, rpl.loop_errors), + GET_STAT(iface, rpl.loop_warnings)); NET_INFO("RPL r-repairs %d", - GET_STAT(rpl.root_repairs)); + GET_STAT(iface, rpl.root_repairs)); #endif /* CONFIG_NET_STATISTICS_RPL */ - NET_INFO("Bytes received %u", GET_STAT(bytes.received)); - NET_INFO("Bytes sent %u", GET_STAT(bytes.sent)); - NET_INFO("Processing err %d", GET_STAT(processing_error)); + NET_INFO("Bytes received %u", GET_STAT(iface, bytes.received)); + NET_INFO("Bytes sent %u", GET_STAT(iface, bytes.sent)); + NET_INFO("Processing err %d", + GET_STAT(iface, processing_error)); + +#if NET_TC_COUNT > 1 +#if NET_TC_TX_COUNT > 1 + NET_INFO("TX traffic class statistics:"); + NET_INFO("TC Priority\tSent pkts\tbytes"); + + for (i = 0; i < NET_TC_TX_COUNT; i++) { + NET_INFO("[%d] %s (%d)\t%d\t\t%d", i, + priority2str(GET_STAT(iface, + tc.sent[i].priority)), + GET_STAT(iface, tc.sent[i].priority), + GET_STAT(iface, tc.sent[i].pkts), + GET_STAT(iface, tc.sent[i].bytes)); + } +#endif + +#if NET_TC_RX_COUNT > 1 + NET_INFO("RX traffic class statistics:"); + NET_INFO("TC Priority\tRecv pkts\tbytes"); + + for (i = 0; i < NET_TC_RX_COUNT; i++) { + NET_INFO("[%d] %s (%d)\t%d\t\t%d", i, + priority2str(GET_STAT(iface, + tc.recv[i].priority)), + GET_STAT(iface, tc.recv[i].priority), + GET_STAT(iface, tc.recv[i].pkts), + GET_STAT(iface, tc.recv[i].bytes)); + } +#endif + ARG_UNUSED(i); +#endif /* NET_TC_COUNT > 1 */ new_print = curr + PRINT_STATISTICS_INTERVAL; if (new_print > curr) { @@ -156,12 +228,27 @@ static inline void stats(void) } } -void net_print_statistics(void) +void net_print_statistics_iface(struct net_if *iface) { /* In order to make the info print lines shorter, use shorter * function name. */ - stats(); + stats(iface); +} + +static void iface_cb(struct net_if *iface, void *user_data) +{ + net_print_statistics_iface(iface); +} + +void net_print_statistics_all(void) +{ + net_if_foreach(iface_cb, NULL); +} + +void net_print_statistics(void) +{ + net_print_statistics_iface(NULL); } #endif /* CONFIG_NET_STATISTICS_PERIODIC_OUTPUT */ @@ -174,66 +261,82 @@ static int net_stats_get(u32_t mgmt_request, struct net_if *iface, size_t len_chk = 0; void *src = NULL; - ARG_UNUSED(iface); - switch (NET_MGMT_GET_COMMAND(mgmt_request)) { case NET_REQUEST_STATS_CMD_GET_ALL: len_chk = sizeof(struct net_stats); +#if defined(CONFIG_NET_STATISTICS_PER_INTERFACE) + src = iface ? &iface->stats : &net_stats; +#else src = &net_stats; +#endif break; case NET_REQUEST_STATS_CMD_GET_PROCESSING_ERROR: len_chk = sizeof(net_stats_t); - src = &net_stats.processing_error; + src = GET_STAT_ADDR(iface, processing_error); break; case NET_REQUEST_STATS_CMD_GET_BYTES: len_chk = sizeof(struct net_stats_bytes); - src = &net_stats.bytes; + src = GET_STAT_ADDR(iface, bytes); break; case NET_REQUEST_STATS_CMD_GET_IP_ERRORS: len_chk = sizeof(struct net_stats_ip_errors); - src = &net_stats.ip_errors; + src = GET_STAT_ADDR(iface, ip_errors); break; #if defined(CONFIG_NET_STATISTICS_IPV4) case NET_REQUEST_STATS_CMD_GET_IPV4: len_chk = sizeof(struct net_stats_ip); - src = &net_stats.ipv4; + src = GET_STAT_ADDR(iface, ipv4); break; #endif #if defined(CONFIG_NET_STATISTICS_IPV6) case NET_REQUEST_STATS_CMD_GET_IPV6: len_chk = sizeof(struct net_stats_ip); - src = &net_stats.ipv6; + src = GET_STAT_ADDR(iface, ipv6); break; #endif #if defined(CONFIG_NET_STATISTICS_IPV6_ND) case NET_REQUEST_STATS_CMD_GET_IPV6_ND: len_chk = sizeof(struct net_stats_ipv6_nd); - src = &net_stats.ipv6_nd; + src = GET_STAT_ADDR(iface, ipv6_nd); break; #endif #if defined(CONFIG_NET_STATISTICS_ICMP) case NET_REQUEST_STATS_CMD_GET_ICMP: len_chk = sizeof(struct net_stats_icmp); - src = &net_stats.icmp; + src = GET_STAT_ADDR(iface, icmp); break; #endif #if defined(CONFIG_NET_STATISTICS_UDP) case NET_REQUEST_STATS_CMD_GET_UDP: len_chk = sizeof(struct net_stats_udp); - src = &net_stats.udp; + src = GET_STAT_ADDR(iface, udp); break; #endif #if defined(CONFIG_NET_STATISTICS_TCP) case NET_REQUEST_STATS_CMD_GET_TCP: len_chk = sizeof(struct net_stats_tcp); - src = &net_stats.tcp; + src = GET_STAT_ADDR(iface, tcp); break; #endif #if defined(CONFIG_NET_STATISTICS_RPL) case NET_REQUEST_STATS_CMD_GET_RPL: len_chk = sizeof(struct net_stats_rpl); - src = &net_stats.rpl; + src = GET_STAT_ADDR(iface, rpl); break; +#endif +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + case NET_REQUEST_STATS_CMD_GET_ETHERNET: { + const struct ethernet_api *eth; + + if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) { + return -ENOENT; + } + + eth = net_if_get_device(iface)->driver_api; + len_chk = sizeof(struct net_stats_eth); + src = eth->stats; + break; + } #endif } @@ -241,7 +344,7 @@ static int net_stats_get(u32_t mgmt_request, struct net_if *iface, return -EINVAL; } - memcpy(src, data, len); + memcpy(data, src, len); return 0; } @@ -293,4 +396,9 @@ NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_STATS_GET_RPL, net_stats_get); #endif +#if defined(CONFIG_NET_STATISTICS_ETHERNET) +NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_STATS_GET_ETHERNET, + net_stats_get); +#endif + #endif /* CONFIG_NET_STATISTICS_USER_API */ diff --git a/subsys/net/ip/net_stats.h b/subsys/net/ip/net_stats.h index 7808b64273b94..b29bbff8d720c 100644 --- a/subsys/net/ip/net_stats.h +++ b/subsys/net/ip/net_stats.h @@ -11,428 +11,560 @@ #include #include +#include extern struct net_stats net_stats; -#define GET_STAT(s) net_stats.s +#if defined(CONFIG_NET_STATISTICS_PER_INTERFACE) +#define SET_STAT(cmd) cmd +#define GET_STAT(iface, s) (iface ? iface->stats.s : net_stats.s) +#define GET_STAT_ADDR(iface, s) (iface ? &iface->stats.s : &net_stats.s) +#else +#define SET_STAT(cmd) +#define GET_STAT(iface, s) net_stats.s +#define GET_STAT_ADDR(iface, s) &GET_STAT(iface, s) +#endif /* Core stats */ -static inline void net_stats_update_processing_error(void) +static inline void net_stats_update_processing_error(struct net_if *iface) { + SET_STAT(iface->stats.processing_error++); net_stats.processing_error++; } -static inline void net_stats_update_ip_errors_protoerr(void) +static inline void net_stats_update_ip_errors_protoerr(struct net_if *iface) { + SET_STAT(iface->stats.ip_errors.protoerr++); net_stats.ip_errors.protoerr++; } -static inline void net_stats_update_ip_errors_vhlerr(void) +static inline void net_stats_update_ip_errors_vhlerr(struct net_if *iface) { + SET_STAT(iface->stats.ip_errors.vhlerr++); net_stats.ip_errors.vhlerr++; } -static inline void net_stats_update_bytes_recv(u32_t bytes) +static inline void net_stats_update_bytes_recv(struct net_if *iface, + u32_t bytes) { + SET_STAT(iface->stats.bytes.received += bytes); net_stats.bytes.received += bytes; } -static inline void net_stats_update_bytes_sent(u32_t bytes) +static inline void net_stats_update_bytes_sent(struct net_if *iface, + u32_t bytes) { + SET_STAT(iface->stats.bytes.sent += bytes); net_stats.bytes.sent += bytes; } #else -#define net_stats_update_processing_error() -#define net_stats_update_ip_errors_protoerr() -#define net_stats_update_ip_errors_vhlerr() -#define net_stats_update_bytes_recv(...) -#define net_stats_update_bytes_sent(...) +#define net_stats_update_processing_error(iface) +#define net_stats_update_ip_errors_protoerr(iface) +#define net_stats_update_ip_errors_vhlerr(iface) +#define net_stats_update_bytes_recv(iface, bytes) +#define net_stats_update_bytes_sent(iface, bytes) #endif /* CONFIG_NET_STATISTICS */ #if defined(CONFIG_NET_STATISTICS_IPV6) /* IPv6 stats */ -static inline void net_stats_update_ipv6_sent(void) +static inline void net_stats_update_ipv6_sent(struct net_if *iface) { + SET_STAT(iface->stats.ipv6.sent++); net_stats.ipv6.sent++; } -static inline void net_stats_update_ipv6_recv(void) +static inline void net_stats_update_ipv6_recv(struct net_if *iface) { + SET_STAT(iface->stats.ipv6.recv++); net_stats.ipv6.recv++; } -static inline void net_stats_update_ipv6_drop(void) +static inline void net_stats_update_ipv6_drop(struct net_if *iface) { + SET_STAT(iface->stats.ipv6.drop++); net_stats.ipv6.drop++; } #else -#define net_stats_update_ipv6_drop() -#define net_stats_update_ipv6_sent() -#define net_stats_update_ipv6_recv() +#define net_stats_update_ipv6_drop(iface) +#define net_stats_update_ipv6_sent(iface) +#define net_stats_update_ipv6_recv(iface) #endif /* CONFIG_NET_STATISTICS_IPV6 */ #if defined(CONFIG_NET_STATISTICS_IPV6_ND) /* IPv6 Neighbor Discovery stats*/ -static inline void net_stats_update_ipv6_nd_sent(void) +static inline void net_stats_update_ipv6_nd_sent(struct net_if *iface) { + SET_STAT(iface->stats.ipv6_nd.sent++); net_stats.ipv6_nd.sent++; } -static inline void net_stats_update_ipv6_nd_recv(void) +static inline void net_stats_update_ipv6_nd_recv(struct net_if *iface) { + SET_STAT(iface->stats.ipv6_nd.recv++); net_stats.ipv6_nd.recv++; } -static inline void net_stats_update_ipv6_nd_drop(void) +static inline void net_stats_update_ipv6_nd_drop(struct net_if *iface) { + SET_STAT(iface->stats.ipv6_nd.drop++); net_stats.ipv6_nd.drop++; } #else -#define net_stats_update_ipv6_nd_sent() -#define net_stats_update_ipv6_nd_recv() -#define net_stats_update_ipv6_nd_drop() +#define net_stats_update_ipv6_nd_sent(iface) +#define net_stats_update_ipv6_nd_recv(iface) +#define net_stats_update_ipv6_nd_drop(iface) #endif /* CONFIG_NET_STATISTICS_IPV6_ND */ #if defined(CONFIG_NET_STATISTICS_IPV4) /* IPv4 stats */ -static inline void net_stats_update_ipv4_drop(void) +static inline void net_stats_update_ipv4_drop(struct net_if *iface) { + SET_STAT(iface->stats.ipv4.drop++); net_stats.ipv4.drop++; } -static inline void net_stats_update_ipv4_sent(void) +static inline void net_stats_update_ipv4_sent(struct net_if *iface) { + SET_STAT(iface->stats.ipv4.sent++); net_stats.ipv4.sent++; } -static inline void net_stats_update_ipv4_recv(void) +static inline void net_stats_update_ipv4_recv(struct net_if *iface) { + SET_STAT(iface->stats.ipv4.recv++); net_stats.ipv4.recv++; } #else -#define net_stats_update_ipv4_drop() -#define net_stats_update_ipv4_sent() -#define net_stats_update_ipv4_recv() +#define net_stats_update_ipv4_drop(iface) +#define net_stats_update_ipv4_sent(iface) +#define net_stats_update_ipv4_recv(iface) #endif /* CONFIG_NET_STATISTICS_IPV4 */ #if defined(CONFIG_NET_STATISTICS_ICMP) /* Common ICMPv4/ICMPv6 stats */ -static inline void net_stats_update_icmp_sent(void) +static inline void net_stats_update_icmp_sent(struct net_if *iface) { + SET_STAT(iface->stats.icmp.sent++); net_stats.icmp.sent++; } -static inline void net_stats_update_icmp_recv(void) +static inline void net_stats_update_icmp_recv(struct net_if *iface) { + SET_STAT(iface->stats.icmp.recv++); net_stats.icmp.recv++; } -static inline void net_stats_update_icmp_drop(void) +static inline void net_stats_update_icmp_drop(struct net_if *iface) { + SET_STAT(iface->stats.icmp.drop++); net_stats.icmp.drop++; } #else -#define net_stats_update_icmp_sent() -#define net_stats_update_icmp_recv() -#define net_stats_update_icmp_drop() +#define net_stats_update_icmp_sent(iface) +#define net_stats_update_icmp_recv(iface) +#define net_stats_update_icmp_drop(iface) #endif /* CONFIG_NET_STATISTICS_ICMP */ #if defined(CONFIG_NET_STATISTICS_UDP) /* UDP stats */ -static inline void net_stats_update_udp_sent(void) +static inline void net_stats_update_udp_sent(struct net_if *iface) { + SET_STAT(iface->stats.udp.sent++); net_stats.udp.sent++; } -static inline void net_stats_update_udp_recv(void) +static inline void net_stats_update_udp_recv(struct net_if *iface) { + SET_STAT(iface->stats.udp.recv++); net_stats.udp.recv++; } -static inline void net_stats_update_udp_drop(void) +static inline void net_stats_update_udp_drop(struct net_if *iface) { + SET_STAT(iface->stats.udp.drop++); net_stats.udp.drop++; } -static inline void net_stats_update_udp_chkerr(void) +static inline void net_stats_update_udp_chkerr(struct net_if *iface) { + SET_STAT(iface->stats.udp.chkerr++); net_stats.udp.chkerr++; } #else -#define net_stats_update_udp_sent() -#define net_stats_update_udp_recv() -#define net_stats_update_udp_drop() -#define net_stats_update_udp_chkerr() +#define net_stats_update_udp_sent(iface) +#define net_stats_update_udp_recv(iface) +#define net_stats_update_udp_drop(iface) +#define net_stats_update_udp_chkerr(iface) #endif /* CONFIG_NET_STATISTICS_UDP */ #if defined(CONFIG_NET_STATISTICS_TCP) /* TCP stats */ -static inline void net_stats_update_tcp_sent(u32_t bytes) +static inline void net_stats_update_tcp_sent(struct net_if *iface, u32_t bytes) { + SET_STAT(iface->stats.tcp.bytes.sent += bytes); net_stats.tcp.bytes.sent += bytes; } -static inline void net_stats_update_tcp_recv(u32_t bytes) +static inline void net_stats_update_tcp_recv(struct net_if *iface, u32_t bytes) { + SET_STAT(iface->stats.tcp.bytes.received += bytes); net_stats.tcp.bytes.received += bytes; } -static inline void net_stats_update_tcp_resent(u32_t bytes) +static inline void net_stats_update_tcp_resent(struct net_if *iface, + u32_t bytes) { + SET_STAT(iface->stats.tcp.resent += bytes); net_stats.tcp.resent += bytes; } -static inline void net_stats_update_tcp_seg_sent(void) +static inline void net_stats_update_tcp_seg_sent(struct net_if *iface) { + SET_STAT(iface->stats.tcp.sent++); net_stats.tcp.sent++; } -static inline void net_stats_update_tcp_seg_recv(void) +static inline void net_stats_update_tcp_seg_recv(struct net_if *iface) { + SET_STAT(iface->stats.tcp.recv++); net_stats.tcp.recv++; } -static inline void net_stats_update_tcp_seg_drop(void) +static inline void net_stats_update_tcp_seg_drop(struct net_if *iface) { + SET_STAT(iface->stats.tcp.drop++); net_stats.tcp.drop++; } -static inline void net_stats_update_tcp_seg_rst(void) +static inline void net_stats_update_tcp_seg_rst(struct net_if *iface) { + SET_STAT(iface->stats.tcp.rst++); net_stats.tcp.rst++; } -static inline void net_stats_update_tcp_seg_conndrop(void) +static inline void net_stats_update_tcp_seg_conndrop(struct net_if *iface) { + SET_STAT(iface->stats.tcp.conndrop++); net_stats.tcp.conndrop++; } -static inline void net_stats_update_tcp_seg_connrst(void) +static inline void net_stats_update_tcp_seg_connrst(struct net_if *iface) { + SET_STAT(iface->stats.tcp.connrst++); net_stats.tcp.connrst++; } -static inline void net_stats_update_tcp_seg_chkerr(void) +static inline void net_stats_update_tcp_seg_chkerr(struct net_if *iface) { + SET_STAT(iface->stats.tcp.chkerr++); net_stats.tcp.chkerr++; } -static inline void net_stats_update_tcp_seg_ackerr(void) +static inline void net_stats_update_tcp_seg_ackerr(struct net_if *iface) { + SET_STAT(iface->stats.tcp.ackerr++); net_stats.tcp.ackerr++; } -static inline void net_stats_update_tcp_seg_rsterr(void) +static inline void net_stats_update_tcp_seg_rsterr(struct net_if *iface) { + SET_STAT(iface->stats.tcp.rsterr++); net_stats.tcp.rsterr++; } -static inline void net_stats_update_tcp_seg_rexmit(void) +static inline void net_stats_update_tcp_seg_rexmit(struct net_if *iface) { + SET_STAT(iface->stats.tcp.rexmit++); net_stats.tcp.rexmit++; } #else -#define net_stats_update_tcp_sent(...) -#define net_stats_update_tcp_resent(...) -#define net_stats_update_tcp_recv(...) -#define net_stats_update_tcp_seg_sent() -#define net_stats_update_tcp_seg_recv() -#define net_stats_update_tcp_seg_drop() -#define net_stats_update_tcp_seg_rst() -#define net_stats_update_tcp_seg_conndrop() -#define net_stats_update_tcp_seg_connrst() -#define net_stats_update_tcp_seg_chkerr() -#define net_stats_update_tcp_seg_ackerr() -#define net_stats_update_tcp_seg_rsterr() -#define net_stats_update_tcp_seg_rexmit() +#define net_stats_update_tcp_sent(iface, bytes) +#define net_stats_update_tcp_resent(iface, bytes) +#define net_stats_update_tcp_recv(iface, bytes) +#define net_stats_update_tcp_seg_sent(iface) +#define net_stats_update_tcp_seg_recv(iface) +#define net_stats_update_tcp_seg_drop(iface) +#define net_stats_update_tcp_seg_rst(iface) +#define net_stats_update_tcp_seg_conndrop(iface) +#define net_stats_update_tcp_seg_connrst(iface) +#define net_stats_update_tcp_seg_chkerr(iface) +#define net_stats_update_tcp_seg_ackerr(iface) +#define net_stats_update_tcp_seg_rsterr(iface) +#define net_stats_update_tcp_seg_rexmit(iface) #endif /* CONFIG_NET_STATISTICS_TCP */ -static inline void net_stats_update_per_proto_recv(enum net_ip_protocol proto) +static inline void net_stats_update_per_proto_recv(struct net_if *iface, + enum net_ip_protocol proto) { if (IS_ENABLED(CONFIG_NET_UDP) && proto == IPPROTO_UDP) { - net_stats_update_udp_recv(); + net_stats_update_udp_recv(iface); } else if (IS_ENABLED(CONFIG_NET_TCP) && proto == IPPROTO_TCP) { - net_stats_update_tcp_seg_recv(); + net_stats_update_tcp_seg_recv(iface); } } -static inline void net_stats_update_per_proto_drop(enum net_ip_protocol proto) +static inline void net_stats_update_per_proto_drop(struct net_if *iface, + enum net_ip_protocol proto) { if (IS_ENABLED(CONFIG_NET_UDP) && proto == IPPROTO_UDP) { - net_stats_update_udp_drop(); + net_stats_update_udp_drop(iface); } else if (IS_ENABLED(CONFIG_NET_TCP) && proto == IPPROTO_TCP) { - net_stats_update_tcp_seg_drop(); + net_stats_update_tcp_seg_drop(iface); } } #if defined(CONFIG_NET_STATISTICS_RPL) /* RPL stats */ -static inline void net_stats_update_rpl_resets(void) +static inline void net_stats_update_rpl_resets(struct net_if *iface) { + SET_STAT(iface->stats.rpl.resets++); net_stats.rpl.resets++; } -static inline void net_stats_update_rpl_mem_overflows(void) +static inline void net_stats_update_rpl_mem_overflows(struct net_if *iface) { + SET_STAT(iface->stats.rpl.mem_overflows++); net_stats.rpl.mem_overflows++; } -static inline void net_stats_update_rpl_parent_switch(void) +static inline void net_stats_update_rpl_parent_switch(struct net_if *iface) { + SET_STAT(iface->stats.rpl.parent_switch++); net_stats.rpl.parent_switch++; } -static inline void net_stats_update_rpl_local_repairs(void) +static inline void net_stats_update_rpl_local_repairs(struct net_if *iface) { + SET_STAT(iface->stats.rpl.local_repairs++); net_stats.rpl.local_repairs++; } -static inline void net_stats_update_rpl_global_repairs(void) +static inline void net_stats_update_rpl_global_repairs(struct net_if *iface) { + SET_STAT(iface->stats.rpl.global_repairs++); net_stats.rpl.global_repairs++; } -static inline void net_stats_update_rpl_root_repairs(void) +static inline void net_stats_update_rpl_root_repairs(struct net_if *iface) { + SET_STAT(iface->stats.rpl.root_repairs++); net_stats.rpl.root_repairs++; } -static inline void net_stats_update_rpl_malformed_msgs(void) +static inline void net_stats_update_rpl_malformed_msgs(struct net_if *iface) { + SET_STAT(iface->stats.rpl.malformed_msgs++); net_stats.rpl.malformed_msgs++; } -static inline void net_stats_update_rpl_forward_errors(void) +static inline void net_stats_update_rpl_forward_errors(struct net_if *iface) { + SET_STAT(iface->stats.rpl.forward_errors++); net_stats.rpl.forward_errors++; } -static inline void net_stats_update_rpl_loop_errors(void) +static inline void net_stats_update_rpl_loop_errors(struct net_if *iface) { + SET_STAT(iface->stats.rpl.loop_errors++); net_stats.rpl.loop_errors++; } -static inline void net_stats_update_rpl_loop_warnings(void) +static inline void net_stats_update_rpl_loop_warnings(struct net_if *iface) { + SET_STAT(iface->stats.rpl.loop_warnings++); net_stats.rpl.loop_warnings++; } -static inline void net_stats_update_rpl_dis_sent(void) +static inline void net_stats_update_rpl_dis_sent(struct net_if *iface) { + SET_STAT(iface->stats.rpl.dis.sent++); net_stats.rpl.dis.sent++; } -static inline void net_stats_update_rpl_dio_sent(void) +static inline void net_stats_update_rpl_dio_sent(struct net_if *iface) { + SET_STAT(iface->stats.rpl.dio.sent++); net_stats.rpl.dio.sent++; } -static inline void net_stats_update_rpl_dao_sent(void) +static inline void net_stats_update_rpl_dao_sent(struct net_if *iface) { + SET_STAT(iface->stats.rpl.dao.sent++); net_stats.rpl.dao.sent++; } -static inline void net_stats_update_rpl_dao_forwarded(void) +static inline void net_stats_update_rpl_dao_forwarded(struct net_if *iface) { + SET_STAT(iface->stats.rpl.dao.forwarded++); net_stats.rpl.dao.forwarded++; } -static inline void net_stats_update_rpl_dao_ack_sent(void) +static inline void net_stats_update_rpl_dao_ack_sent(struct net_if *iface) { + SET_STAT(iface->stats.rpl.dao_ack.sent++); net_stats.rpl.dao_ack.sent++; } -static inline void net_stats_update_rpl_dao_ack_recv(void) +static inline void net_stats_update_rpl_dao_ack_recv(struct net_if *iface) { + SET_STAT(iface->stats.rpl.dao_ack.recv++); net_stats.rpl.dao_ack.recv++; } #else -#define net_stats_update_rpl_resets() -#define net_stats_update_rpl_mem_overflows() -#define net_stats_update_rpl_parent_switch() -#define net_stats_update_rpl_local_repairs() -#define net_stats_update_rpl_global_repairs() -#define net_stats_update_rpl_root_repairs() -#define net_stats_update_rpl_malformed_msgs() -#define net_stats_update_rpl_forward_errors() -#define net_stats_update_rpl_loop_errors() -#define net_stats_update_rpl_loop_warnings() -#define net_stats_update_rpl_dis_sent() -#define net_stats_update_rpl_dio_sent() -#define net_stats_update_rpl_dao_sent() -#define net_stats_update_rpl_dao_forwarded() -#define net_stats_update_rpl_dao_ack_sent() -#define net_stats_update_rpl_dao_ack_recv() +#define net_stats_update_rpl_resets(iface) +#define net_stats_update_rpl_mem_overflows(iface) +#define net_stats_update_rpl_parent_switch(iface) +#define net_stats_update_rpl_local_repairs(iface) +#define net_stats_update_rpl_global_repairs(iface) +#define net_stats_update_rpl_root_repairs(iface) +#define net_stats_update_rpl_malformed_msgs(iface) +#define net_stats_update_rpl_forward_errors(iface) +#define net_stats_update_rpl_loop_errors(iface) +#define net_stats_update_rpl_loop_warnings(iface) +#define net_stats_update_rpl_dis_sent(iface) +#define net_stats_update_rpl_dio_sent(iface) +#define net_stats_update_rpl_dao_sent(iface) +#define net_stats_update_rpl_dao_forwarded(iface) +#define net_stats_update_rpl_dao_ack_sent(iface) +#define net_stats_update_rpl_dao_ack_recv(iface) #endif /* CONFIG_NET_STATISTICS_RPL */ #if defined(CONFIG_NET_STATISTICS_MLD) -static inline void net_stats_update_ipv6_mld_recv(void) +static inline void net_stats_update_ipv6_mld_recv(struct net_if *iface) { + SET_STAT(iface->stats.ipv6_mld.recv++); net_stats.ipv6_mld.recv++; } -static inline void net_stats_update_ipv6_mld_sent(void) +static inline void net_stats_update_ipv6_mld_sent(struct net_if *iface) { + SET_STAT(iface->stats.ipv6_mld.sent++); net_stats.ipv6_mld.sent++; } -static inline void net_stats_update_ipv6_mld_drop(void) +static inline void net_stats_update_ipv6_mld_drop(struct net_if *iface) { + SET_STAT(iface->stats.ipv6_mld.drop++); net_stats.ipv6_mld.drop++; } #else -#define net_stats_update_ipv6_mld_recv() -#define net_stats_update_ipv6_mld_sent() -#define net_stats_update_ipv6_mld_drop() +#define net_stats_update_ipv6_mld_recv(iface) +#define net_stats_update_ipv6_mld_sent(iface) +#define net_stats_update_ipv6_mld_drop(iface) #endif /* CONFIG_NET_STATISTICS_MLD */ #if (NET_TC_COUNT > 1) && defined(CONFIG_NET_STATISTICS) -static inline void net_stats_update_tc_sent_pkt(u8_t tc) +static inline void net_stats_update_tc_sent_pkt(struct net_if *iface, u8_t tc) { + SET_STAT(iface->stats.tc.sent[tc].pkts++); net_stats.tc.sent[tc].pkts++; } -static inline void net_stats_update_tc_sent_bytes(u8_t tc, size_t bytes) +static inline void net_stats_update_tc_sent_bytes(struct net_if *iface, + u8_t tc, size_t bytes) { + SET_STAT(iface->stats.tc.sent[tc].bytes += bytes); net_stats.tc.sent[tc].bytes += bytes; } -static inline void net_stats_update_tc_sent_priority(u8_t tc, u8_t priority) +static inline void net_stats_update_tc_sent_priority(struct net_if *iface, + u8_t tc, u8_t priority) { + SET_STAT(iface->stats.tc.sent[tc].priority = priority); net_stats.tc.sent[tc].priority = priority; } -static inline void net_stats_update_tc_recv_pkt(u8_t tc) +static inline void net_stats_update_tc_recv_pkt(struct net_if *iface, u8_t tc) { + SET_STAT(iface->stats.tc.recv[tc].pkts++); net_stats.tc.recv[tc].pkts++; } -static inline void net_stats_update_tc_recv_bytes(u8_t tc, size_t bytes) +static inline void net_stats_update_tc_recv_bytes(struct net_if *iface, + u8_t tc, size_t bytes) { + SET_STAT(iface->stats.tc.recv[tc].bytes += bytes); net_stats.tc.recv[tc].bytes += bytes; } -static inline void net_stats_update_tc_recv_priority(u8_t tc, u8_t priority) +static inline void net_stats_update_tc_recv_priority(struct net_if *iface, + u8_t tc, u8_t priority) { + SET_STAT(iface->stats.tc.recv[tc].priority = priority); net_stats.tc.recv[tc].priority = priority; } #else -#define net_stats_update_tc_sent_pkt(tc) -#define net_stats_update_tc_sent_bytes(tc, bytes) -#define net_stats_update_tc_sent_priority(tc, priority) -#define net_stats_update_tc_recv_pkt(tc) -#define net_stats_update_tc_recv_bytes(tc, bytes) -#define net_stats_update_tc_recv_priority(tc, priority) +#define net_stats_update_tc_sent_pkt(iface, tc) +#define net_stats_update_tc_sent_bytes(iface, tc, bytes) +#define net_stats_update_tc_sent_priority(iface, tc, priority) +#define net_stats_update_tc_recv_pkt(iface, tc) +#define net_stats_update_tc_recv_bytes(iface, tc, bytes) +#define net_stats_update_tc_recv_priority(iface, tc, priority) #endif /* NET_TC_COUNT > 1 */ +#if defined(CONFIG_NET_PKT_TIMESTAMP) && defined(CONFIG_NET_STATISTICS) +#define _NET_STATS_AVG_SAMPLES 100 + +static inline +void _net_stats_update_pkt_timestamp(struct net_stats_ts_data *data, + u32_t ts) +{ + if (ts == UINT32_MAX || ts == 0) { + return; + } + + /* Do not calculate highest or lowest number into rolling average */ + + if (ts < data->low || data->low == 0) { + data->low = ts; + return; + } + + if (ts > data->high) { + data->high = ts; + return; + } + + if (data->average) { + if (ts > (10 * data->average)) { + /* If the time is too large, just skip it */ + return; + } + + data->average = (data->average * + (_NET_STATS_AVG_SAMPLES - 1) + ts) / + _NET_STATS_AVG_SAMPLES; + } else { + data->average = ts; + } +} + +static inline void net_stats_update_pkt_tx_timestamp(u8_t tc, u32_t ts) +{ + _net_stats_update_pkt_timestamp(&net_stats.ts.tx[tc].time, ts); +} + +static inline void net_stats_update_pkt_rx_timestamp(u8_t tc, u32_t ts) +{ + _net_stats_update_pkt_timestamp(&net_stats.ts.rx[tc].time, ts); +} +#else +#define net_stats_update_pkt_tx_timestamp(ts) +#define net_stats_update_pkt_rx_timestamp(ts) +#endif /* CONFIG_NET_PKT_TIMESTAMP */ + #if defined(CONFIG_NET_STATISTICS_PERIODIC_OUTPUT) /* A simple periodic statistic printer, used only in net core */ +void net_print_statistics_all(void); +void net_print_statistics_iface(struct net_if *iface); void net_print_statistics(void); #else +#define net_print_statistics_all() +#define net_print_statistics_iface(iface) #define net_print_statistics() #endif diff --git a/subsys/net/ip/net_tc.c b/subsys/net/ip/net_tc.c index 0aa3d945af691..c5a54300fffa5 100644 --- a/subsys/net/ip/net_tc.c +++ b/subsys/net/ip/net_tc.c @@ -270,23 +270,41 @@ static int rx_tc2thread(int tc) /* Fixup the traffic class statistics so that "net stats" shell command will * print output correctly. */ -static void tc_tx_stats_priority_setup(void) +static void tc_tx_stats_priority_setup(struct net_if *iface) { int i; for (i = 0; i < 8; i++) { - net_stats_update_tc_sent_priority(net_tx_priority2tc(i), i); + net_stats_update_tc_sent_priority(iface, net_tx_priority2tc(i), + i); } } -static void tc_rx_stats_priority_setup(void) +static void tc_rx_stats_priority_setup(struct net_if *iface) { int i; for (i = 0; i < 8; i++) { - net_stats_update_tc_recv_priority(net_rx_priority2tc(i), i); + net_stats_update_tc_recv_priority(iface, net_rx_priority2tc(i), + i); } } + +static void net_tc_tx_stats_priority_setup(struct net_if *iface, + void *user_data) +{ + ARG_UNUSED(user_data); + + tc_tx_stats_priority_setup(iface); +} + +static void net_tc_rx_stats_priority_setup(struct net_if *iface, + void *user_data) +{ + ARG_UNUSED(user_data); + + tc_rx_stats_priority_setup(iface); +} #endif /* Create workqueue for each traffic class we are using. All the network @@ -300,7 +318,7 @@ void net_tc_tx_init(void) BUILD_ASSERT(NET_TC_TX_COUNT > 0); #if defined(CONFIG_NET_STATISTICS) - tc_tx_stats_priority_setup(); + net_if_foreach(net_tc_tx_stats_priority_setup, NULL); #endif for (i = 0; i < NET_TC_TX_COUNT; i++) { @@ -340,7 +358,7 @@ void net_tc_rx_init(void) BUILD_ASSERT(NET_TC_RX_COUNT > 0); #if defined(CONFIG_NET_STATISTICS) - tc_rx_stats_priority_setup(); + net_if_foreach(net_tc_rx_stats_priority_setup, NULL); #endif for (i = 0; i < NET_TC_RX_COUNT; i++) { diff --git a/subsys/net/ip/rpl.c b/subsys/net/ip/rpl.c index 6662bc187b097..dcca4091ab1a9 100644 --- a/subsys/net/ip/rpl.c +++ b/subsys/net/ip/rpl.c @@ -593,8 +593,8 @@ int net_rpl_dio_send(struct net_if *iface, dst ? "unicast" : "multicast", instance->current_dag->rank, iface); - net_stats_update_icmp_sent(); - net_stats_update_rpl_dio_sent(); + net_stats_update_icmp_sent(iface); + net_stats_update_rpl_dio_sent(iface); return 0; } @@ -710,7 +710,7 @@ static void net_rpl_dio_reset_timer(struct net_rpl_instance *instance) new_dio_interval(instance); } - net_stats_update_rpl_resets(); + net_stats_update_rpl_resets(instance->iface); } static inline void send_dis_all_interfaces(struct net_if *iface, @@ -1080,19 +1080,24 @@ static struct net_rpl_instance *net_rpl_alloc_instance(u8_t instance_id) return NULL; } -static struct net_rpl_dag *alloc_dag(u8_t instance_id, +static struct net_rpl_dag *alloc_dag(struct net_if *iface, + u8_t instance_id, struct in6_addr *dag_id) { struct net_rpl_instance *instance; struct net_rpl_dag *dag; int i; +#if !defined(CONFIG_NET_STATISTICS) + ARG_UNUSED(iface); +#endif + instance = net_rpl_get_instance(instance_id); if (!instance) { instance = net_rpl_alloc_instance(instance_id); if (!instance) { NET_ERR("Cannot allocate instance id %d", instance_id); - net_stats_update_rpl_mem_overflows(); + net_stats_update_rpl_mem_overflows(iface); return NULL; } @@ -1285,7 +1290,7 @@ static void net_rpl_reset_dio_timer(struct net_rpl_instance *instance) new_dio_interval(instance); } - net_stats_update_rpl_resets(); + net_stats_update_rpl_resets(instance->iface); } struct net_rpl_dag *net_rpl_set_root_with_version(struct net_if *iface, @@ -1324,7 +1329,7 @@ struct net_rpl_dag *net_rpl_set_root_with_version(struct net_if *iface, } } - dag = alloc_dag(instance_id, dag_id); + dag = alloc_dag(iface, instance_id, dag_id); if (!dag) { NET_DBG("Failed to allocate a DAG"); return NULL; @@ -1912,7 +1917,7 @@ struct net_rpl_dag *net_rpl_select_dag(struct net_if *iface, NET_DBG("Changed preferred parent, rank changed from %u to %u", old_rank, best_dag->rank); - net_stats_update_rpl_parent_switch(); + net_stats_update_rpl_parent_switch(iface); if (instance->mop != NET_RPL_MOP_NO_DOWNWARD_ROUTES) { if (last_parent) { @@ -1983,7 +1988,7 @@ static void net_rpl_local_repair(struct net_if *iface, net_rpl_reset_dio_timer(instance); - net_stats_update_rpl_local_repairs(); + net_stats_update_rpl_local_repairs(iface); } /* Return true if parent is kept, false if it is dropped */ @@ -2060,7 +2065,7 @@ bool net_rpl_repair_root(u8_t instance_id) return false; } - net_stats_update_rpl_root_repairs(); + net_stats_update_rpl_root_repairs(instance->iface); net_rpl_lollipop_increment(&instance->current_dag->version); net_rpl_lollipop_increment(&instance->dtsn); @@ -2136,7 +2141,7 @@ static void global_repair(struct net_if *iface, NET_DBG("Participating in a global repair version %d rank %d", dag->version, dag->rank); - net_stats_update_rpl_global_repairs(); + net_stats_update_rpl_global_repairs(iface); } #define net_rpl_print_parent_info(parent, instance) \ @@ -2849,7 +2854,7 @@ static enum net_verdict handle_dio(struct net_pkt *pkt) break; } else if (!frag && pos == 0xffff) { NET_DBG("Invalid DIO packet"); - net_stats_update_rpl_malformed_msgs(); + net_stats_update_rpl_malformed_msgs(iface); return NET_DROP; } @@ -2870,7 +2875,7 @@ static enum net_verdict handle_dio(struct net_pkt *pkt) case NET_RPL_OPTION_DAG_METRIC_CONTAINER: if (len < 6) { NET_DBG("Invalid DAG MC len %d", len); - net_stats_update_rpl_malformed_msgs(); + net_stats_update_rpl_malformed_msgs(iface); goto out; } @@ -2904,7 +2909,7 @@ static enum net_verdict handle_dio(struct net_pkt *pkt) if (len < 9) { NET_DBG("Invalid destination prefix " "option len %d", len); - net_stats_update_rpl_malformed_msgs(); + net_stats_update_rpl_malformed_msgs(iface); goto out; } @@ -2923,7 +2928,7 @@ static enum net_verdict handle_dio(struct net_pkt *pkt) } else { NET_DBG("Invalid route info option len %d", len); - net_stats_update_rpl_malformed_msgs(); + net_stats_update_rpl_malformed_msgs(iface); goto out; } @@ -2932,7 +2937,7 @@ static enum net_verdict handle_dio(struct net_pkt *pkt) if (len != 16) { NET_DBG("Invalid DAG configuration option " "len %d", len); - net_stats_update_rpl_malformed_msgs(); + net_stats_update_rpl_malformed_msgs(iface); goto out; } @@ -2965,7 +2970,7 @@ static enum net_verdict handle_dio(struct net_pkt *pkt) if (len != 32) { NET_DBG("Invalid DAG prefix info len %d != 32", len); - net_stats_update_rpl_malformed_msgs(); + net_stats_update_rpl_malformed_msgs(iface); goto out; } @@ -3113,8 +3118,8 @@ int net_rpl_dao_send(struct net_if *iface, if (ret >= 0) { net_rpl_dao_info(pkt, src, dst, prefix); - net_stats_update_icmp_sent(); - net_stats_update_rpl_dao_sent(); + net_stats_update_icmp_sent(iface); + net_stats_update_rpl_dao_sent(iface); } else { net_pkt_unref(pkt); } @@ -3166,8 +3171,8 @@ static inline int dao_forward(struct net_if *iface, ret = net_send_data(pkt); if (ret >= 0) { - net_stats_update_icmp_sent(); - net_stats_update_rpl_dao_forwarded(); + net_stats_update_icmp_sent(iface); + net_stats_update_rpl_dao_forwarded(iface); } else { net_pkt_unref(pkt); } @@ -3216,8 +3221,8 @@ static int dao_ack_send(struct in6_addr *src, net_rpl_dao_ack_info(pkt, src, dst, instance->instance_id, sequence); - net_stats_update_icmp_sent(); - net_stats_update_rpl_dao_ack_sent(); + net_stats_update_icmp_sent(iface); + net_stats_update_rpl_dao_ack_sent(iface); } else { net_pkt_unref(pkt); } @@ -3393,7 +3398,7 @@ static enum net_verdict handle_dao(struct net_pkt *pkt) } else if (!frag && pos == 0xffff) { /* Read error */ NET_DBG("Invalid DAO packet"); - net_stats_update_rpl_malformed_msgs(); + net_stats_update_rpl_malformed_msgs(iface); return NET_DROP; } @@ -3560,7 +3565,7 @@ static enum net_verdict handle_dao(struct net_pkt *pkt) route = net_rpl_add_route(dag, net_pkt_iface(pkt), &addr, target_len, dao_sender); if (!route) { - net_stats_update_rpl_mem_overflows(); + net_stats_update_rpl_mem_overflows(iface); NET_DBG("Could not add a route after receiving a DAO"); return NET_DROP; @@ -3676,7 +3681,7 @@ static enum net_verdict handle_dao_ack(struct net_pkt *pkt) return NET_DROP; } - net_stats_update_rpl_dao_ack_recv(); + net_stats_update_rpl_dao_ack_recv(net_pkt_iface(pkt)); net_pkt_unref(pkt); @@ -3825,7 +3830,7 @@ struct net_buf *net_rpl_verify_header(struct net_pkt *pkt, struct net_buf *frag, net_route_del(route); } - net_stats_update_rpl_forward_errors(); + net_stats_update_rpl_forward_errors(net_pkt_iface(pkt)); /* Trigger DAO retransmission */ net_rpl_reset_dio_timer(instance); @@ -3860,7 +3865,7 @@ struct net_buf *net_rpl_verify_header(struct net_pkt *pkt, struct net_buf *frag, sender_closer); if (flags & NET_RPL_HDR_OPT_RANK_ERR) { - net_stats_update_rpl_loop_errors(); + net_stats_update_rpl_loop_errors(net_pkt_iface(pkt)); NET_DBG("Rank error signalled in RPL option!"); @@ -3874,7 +3879,7 @@ struct net_buf *net_rpl_verify_header(struct net_pkt *pkt, struct net_buf *frag, } NET_DBG("Single error tolerated."); - net_stats_update_rpl_loop_warnings(); + net_stats_update_rpl_loop_warnings(net_pkt_iface(pkt)); /* FIXME: Handle (NET_RPL_HDR_OPT_RANK_ERR) errors properly */ *result = true; diff --git a/subsys/net/ip/tcp.c b/subsys/net/ip/tcp.c index 6d1bcfc920cd7..d50d836149f2c 100644 --- a/subsys/net/ip/tcp.c +++ b/subsys/net/ip/tcp.c @@ -200,7 +200,8 @@ static void tcp_retry_expired(struct k_work *work) tcp->retry_timeout_shift, tcp, pkt); if (IS_ENABLED(CONFIG_NET_STATISTICS_TCP) && !is_6lo_technology(pkt)) { - net_stats_update_tcp_seg_rexmit(); + net_stats_update_tcp_seg_rexmit( + net_pkt_iface(pkt)); } } } else if (IS_ENABLED(CONFIG_NET_TCP_TIME_WAIT)) { @@ -774,7 +775,7 @@ int net_tcp_queue_data(struct net_context *context, struct net_pkt *pkt) context->tcp->send_seq += data_len; - net_stats_update_tcp_sent(data_len); + net_stats_update_tcp_sent(net_pkt_iface(pkt), data_len); sys_slist_append(&context->tcp->sent_list, &pkt->sent_list); @@ -872,7 +873,8 @@ int net_tcp_send_pkt(struct net_pkt *pkt) if (ret < 0) { net_pkt_unref(new_pkt); } else { - net_stats_update_tcp_seg_rexmit(); + net_stats_update_tcp_seg_rexmit( + net_pkt_iface(pkt)); } return ret; @@ -948,7 +950,7 @@ bool net_tcp_ack_received(struct net_context *ctx, u32_t ack) if (net_tcp_seq_greater(ack, ctx->tcp->send_seq)) { NET_ERR("ctx %p: ACK for unsent data", ctx); - net_stats_update_tcp_seg_ackerr(); + net_stats_update_tcp_seg_ackerr(net_context_get_iface(ctx)); /* RFC 793 doesn't say that invalid ack sequence is an error * in the general case, but we implement tighter checking, * and consider entire packet invalid. diff --git a/subsys/net/lib/app/client.c b/subsys/net/lib/app/client.c index 67f609eec2d91..3f9719a92008c 100644 --- a/subsys/net/lib/app/client.c +++ b/subsys/net/lib/app/client.c @@ -292,7 +292,7 @@ static int bind_local(struct net_app_ctx *ctx) #if defined(CONFIG_NET_IPV4) if (ctx->ipv4.remote.sa_family == AF_INET && ctx->ipv4.ctx) { ctx->ipv4.local.sa_family = AF_INET; - _net_app_set_local_addr(&ctx->ipv4.local, NULL, + _net_app_set_local_addr(ctx, &ctx->ipv4.local, NULL, net_sin(&ctx->ipv4.local)->sin_port); ret = _net_app_set_net_ctx(ctx, ctx->ipv4.ctx, @@ -309,7 +309,7 @@ static int bind_local(struct net_app_ctx *ctx) #if defined(CONFIG_NET_IPV6) if (ctx->ipv6.remote.sa_family == AF_INET6 && ctx->ipv6.ctx) { ctx->ipv6.local.sa_family = AF_INET6; - _net_app_set_local_addr(&ctx->ipv6.local, NULL, + _net_app_set_local_addr(ctx, &ctx->ipv6.local, NULL, net_sin6(&ctx->ipv6.local)->sin6_port); ret = _net_app_set_net_ctx(ctx, ctx->ipv6.ctx, diff --git a/subsys/net/lib/app/net_app.c b/subsys/net/lib/app/net_app.c index 3ee0c86dbf1fe..224b5eb836acd 100644 --- a/subsys/net/lib/app/net_app.c +++ b/subsys/net/lib/app/net_app.c @@ -265,8 +265,8 @@ int _net_app_set_net_ctx(struct net_app_ctx *ctx, return ret; } -int _net_app_set_local_addr(struct sockaddr *addr, const char *myaddr, - u16_t port) +int _net_app_set_local_addr(struct net_app_ctx *ctx, struct sockaddr *addr, + const char *myaddr, u16_t port) { if (myaddr) { void *inaddr; @@ -299,14 +299,15 @@ int _net_app_set_local_addr(struct sockaddr *addr, const char *myaddr, #if defined(CONFIG_NET_IPV6) net_ipaddr_copy(&net_sin6(addr)->sin6_addr, net_if_ipv6_select_src_addr(NULL, - (struct in6_addr *) - net_ipv6_unspecified_address())); + &net_sin6(&ctx->ipv6.remote)->sin6_addr)); #else return -EPFNOSUPPORT; #endif } else if (addr->sa_family == AF_INET) { #if defined(CONFIG_NET_IPV4) - struct net_if *iface = net_if_get_default(); + struct net_if *iface = + net_if_ipv4_select_src_iface( + &net_sin(&ctx->ipv4.remote)->sin_addr); NET_ASSERT(iface->config.ip.ipv4); diff --git a/subsys/net/lib/app/net_app_private.h b/subsys/net/lib/app/net_app_private.h index 02d89f2205c71..8dde22aa32b8b 100644 --- a/subsys/net/lib/app/net_app_private.h +++ b/subsys/net/lib/app/net_app_private.h @@ -50,8 +50,8 @@ void _net_app_received(struct net_context *net_ctx, struct net_pkt *pkt, int status, void *user_data); -int _net_app_set_local_addr(struct sockaddr *addr, const char *myaddr, - u16_t port); +int _net_app_set_local_addr(struct net_app_ctx *ctx, struct sockaddr *addr, + const char *myaddr, u16_t port); int _net_app_set_net_ctx(struct net_app_ctx *ctx, struct net_context *net_ctx, struct sockaddr *addr, diff --git a/subsys/net/lib/app/server.c b/subsys/net/lib/app/server.c index 35d87db3df9b5..e1eea42e93126 100644 --- a/subsys/net/lib/app/server.c +++ b/subsys/net/lib/app/server.c @@ -137,7 +137,7 @@ int net_app_listen(struct net_app_ctx *ctx) ctx->ipv4.local.sa_family = AF_INET; dual = true; - _net_app_set_local_addr(&ctx->ipv4.local, NULL, + _net_app_set_local_addr(ctx, &ctx->ipv4.local, NULL, net_sin(&ctx->ipv4.local)->sin_port); } @@ -167,7 +167,7 @@ int net_app_listen(struct net_app_ctx *ctx) if (ctx->ipv6.local.sa_family == AF_UNSPEC || dual) { ctx->ipv6.local.sa_family = AF_INET6; - _net_app_set_local_addr(&ctx->ipv6.local, NULL, + _net_app_set_local_addr(ctx, &ctx->ipv6.local, NULL, net_sin6(&ctx->ipv6.local)->sin6_port); } diff --git a/subsys/usb/class/netusb/netusb.c b/subsys/usb/class/netusb/netusb.c index 3c4d97b6c3db0..9063c6767090a 100644 --- a/subsys/usb/class/netusb/netusb.c +++ b/subsys/usb/class/netusb/netusb.c @@ -24,6 +24,8 @@ #include #include +#include + #include "../../usb_descriptor.h" #include "../../composite.h" #include "netusb.h" diff --git a/tests/net/all/prj.conf b/tests/net/all/prj.conf index 6cf1293f96a96..7b45e54b3233d 100644 --- a/tests/net/all/prj.conf +++ b/tests/net/all/prj.conf @@ -229,3 +229,6 @@ CONFIG_HTTP_HEADER_FIELD_ITEMS=2 CONFIG_HTTP_CLIENT=y CONFIG_HTTP_PARSER=y CONFIG_HTTP_PARSER_STRICT=y + +# VLAN +CONFIG_NET_VLAN=y diff --git a/tests/net/automatic_testing/CMakeLists.txt b/tests/net/automatic_testing/CMakeLists.txt new file mode 100644 index 0000000000000..f44532dcf5175 --- /dev/null +++ b/tests/net/automatic_testing/CMakeLists.txt @@ -0,0 +1,10 @@ +set(KCONFIG_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/Kconfig) + +include($ENV{ZEPHYR_BASE}/cmake/app/boilerplate.cmake NO_POLICY_SCOPE) +project(NONE) + +target_sources(app PRIVATE src/main.c) +target_sources(app PRIVATE src/echo-server.c) +target_sources_ifdef(CONFIG_NET_TCP app PRIVATE src/tcp.c) +target_sources_ifdef(CONFIG_NET_UDP app PRIVATE src/udp.c) +target_sources_ifdef(CONFIG_NET_VLAN app PRIVATE src/vlan.c) diff --git a/tests/net/automatic_testing/Kconfig b/tests/net/automatic_testing/Kconfig new file mode 100644 index 0000000000000..a5b6287e29f54 --- /dev/null +++ b/tests/net/automatic_testing/Kconfig @@ -0,0 +1,84 @@ +# Kconfig - Private config options for network testing app + +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +mainmenu "Networking test application" + +config ZEPHYR_BASE + string + option env="ZEPHYR_BASE" + +source "$ZEPHYR_BASE/Kconfig.zephyr" + +config NET_DEBUG_TEST_APP + bool "Enable debugging for this test application" + default n + depends on NET_LOG + help + Enable debugging output from this test application. + The used log level is the CONFIG_SYS_LOG_NET_LEVEL option. + +config SAMPLE_VLAN_TAG_1 + int "Virtual lan tag used in this app" + default 100 + range 0 4094 + depends on NET_VLAN + help + Set virtual lan tag (id) that is used in this application. + +config SAMPLE_VLAN_TAG_2 + int "Second VLAN tag used in this app" + default 200 + range 0 4094 + depends on NET_VLAN + help + Set virtual lan tag (id) that is used in this application. + +config SAMPLE_IPV6_ADDR_1 + string "My IPv6 address for a network interface" + help + The value depends on your network setup. + +config SAMPLE_IPV4_ADDR_1 + string "My IPv4 address for a network interface" + help + The value depends on your network setup. + +config SAMPLE_IPV4_GW_1 + string "My IPv4 gateway for a network interface" + help + The value depends on your network setup. + +config SAMPLE_IPV4_NETMASK_1 + string "My IPv4 netmask for a network interface" + default "255.255.255.0" + help + Static netmask to use if not overridden by DHCP. Use empty value to + skip setting static value. + +config SAMPLE_IPV6_ADDR_2 + string "My IPv6 address for another network interface" + help + The value depends on your network setup. + +config SAMPLE_IPV4_ADDR_2 + string "My IPv4 address for another network interface" + help + The value depends on your network setup. + +config SAMPLE_IPV4_GW_2 + string "My IPv4 gateway for another network interface" + help + The value depends on your network setup. + +config SAMPLE_IPV4_NETMASK_2 + string "My IPv4 netmask for another network interface" + default "255.255.255.0" + help + Static netmask to use if not overridden by DHCP. Use empty value to + skip setting static value. + diff --git a/tests/net/automatic_testing/README.rst b/tests/net/automatic_testing/README.rst new file mode 100644 index 0000000000000..5883e190dca8a --- /dev/null +++ b/tests/net/automatic_testing/README.rst @@ -0,0 +1,88 @@ +.. _network-automatic-testing: + +Network Automatic Testing +######################### + +Overview +******** + +This test application for Zephyr will setup two virtual LAN networks +and provides echo-server service for normal and encrypted UDP and +TCP connections. The test application also enables net-shell. + +The source code for this test application can be found at: +:file:`tests/net/automatic_testing`. + +Requirements +************ + +- :ref:`networking_with_qemu` + +Building and Running +******************** + +Normally this test application is launched by automatic test system. +It is also possible to run this testing application with QEMU as described in +:ref:`networking_with_qemu`, with native-posix board or with real hardware. +Note that VLAN is only supported for boards that have ethernet port. + +Follow these steps to build the testing application: + +.. zephyr-app-commands:: + :zephyr-app: tests/net/automatic_testing + :board: + :conf: prj.conf + :goals: build + :compact: + +If this application is run in native_posix board, then normally one needs extra +priviliges to create and configure the TAP device in the host system. +Use sudo command to execute the Zephyr process with admin privileges, like this: + +.. code-block:: console + + sudo --preserve-env=ZEPHYR_BASE make run + +If the sudo command reports an error, then try to execute it like this: + +.. code-block:: console + + sudo --preserve-env make run + +The default configuration file prj.conf creates two virtual LAN networks +with these settings and one normal ethernet interface: + +- VLAN tag 100: IPv4 198.51.100.1 and IPv6 2001:db8:100::1 +- VLAN tag 200: IPv4 203.0.113.1 and IPv6 2001:db8:200::1 +- IPv4 192.0.2.1 and IPv6 2001:db8::1 + +Setting up Linux Host +===================== + +The :file:`samples/net/vlan/vlan-setup-linux.sh` provides a script that can be +executed on the Linux host. It creates two VLAN on the Linux host and +suitable IP routes to Zephyr. This script is not needed for native-posix board +as host IP address setup is done automatically for it. + +If everything is configured correctly, you will be able to successfully execute +the following commands on the Linux host. + +.. code-block:: console + + ping -c 1 2001:db8:100::1 + ping -c 1 198.51.100.1 + ping -c 1 2001:db8:200::1 + ping -c 1 203.0.113.1 + ping -c 1 192.0.2.1 + ping -c 1 2001:db8::1 + +It is also possible to execute echo-client program that can be found in +`net-tools`_ project. + +.. code-block:: console + + ./echo-client 2001:db8::1 + ./echo-client 2001:db8:100::1 + ./echo-client 2001:db8:200::1 + +.. _`net-tools`: https://github.com/zephyrproject-rtos/net-tools diff --git a/tests/net/automatic_testing/net_setup_host b/tests/net/automatic_testing/net_setup_host new file mode 100755 index 0000000000000..aa0417caf0cc9 --- /dev/null +++ b/tests/net/automatic_testing/net_setup_host @@ -0,0 +1,135 @@ +#!/bin/sh +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script is called by native-posix board when TAP network interface +# is taken up by Zephyr. The script should setup the host system so that +# connectivity will work with Zephyr. + +while [ $# -gt 0 ]; do + case "$1" in + -f|--file) + CONF_FILE="$2" + shift + shift;; + -i|--interface) + # Only first -i option is taken into account. This way + # the driver added -i option is ignored if user has specified + # the -i option to host setup script command. + if [ -z "$IFACE" ]; then + IFACE="$2" + fi + shift + shift;; + *) + shift;; + esac +done + +if [ `id -u` != 0 ]; then + echo "Warning: This script will need admin rights to setup \ +network interface!" +fi + +if [ -z "$IFACE" ]; then + IFACE="zeth" +fi + +if [ -z "$CONF_FILE" ]; then + DIR=`dirname $0` + CONF_FILE="$DIR/net_setup_host.conf" +fi + +if [ -f "$CONF_FILE" ]; then + . $CONF_FILE +else + echo "Warning: config file $CONF_FILE does not exist!" +fi + +ip link set dev $IFACE up + +if [ ! -z "$HWADDR" ]; then + ip link set dev $IFACE address $HWADDR +fi + +if [ ! -z "$IPV6_ADDR_1" ]; then + ip -6 address add $IPV6_ADDR_1 dev $IFACE +fi + +if [ ! -z "$IPV6_ROUTE_1" ]; then + ip -6 route add $IPV6_ROUTE_1 dev $IFACE +fi + +if [ ! -z "$IPV4_ADDR_1" ]; then + ip address add $IPV4_ADDR_1 dev $IFACE +fi + +if [ ! -z "$IPV4_ROUTE_1" ]; then + ip route add $IPV4_ROUTE_1 dev $IFACE +fi + +if [ ! -z "$VLAN_NAME_PREFIX" ]; then + if [ ! -z "$VLAN_TAG_1" ]; then + ip link add link ${IFACE} name \ + ${VLAN_NAME_PREFIX}.${VLAN_TAG_1} type vlan id ${VLAN_TAG_1} + ip link set ${VLAN_NAME_PREFIX}.${VLAN_TAG_1} up + + if [ ! -z "$VLAN_ADDR_1_IPV6" ]; then + ip -6 addr add ${VLAN_ADDR_1_IPV6} \ + dev ${VLAN_NAME_PREFIX}.${VLAN_TAG_1} + fi + + if [ ! -z "$VLAN_ROUTE_1_IPV6" ]; then + ip -6 route add ${VLAN_ROUTE_1_IPV6} \ + dev ${VLAN_NAME_PREFIX}.${VLAN_TAG_1} + fi + + if [ ! -z "$VLAN_ADDR_1_IPV4" ]; then + ip addr add ${VLAN_ADDR_1_IPV4} \ + dev ${VLAN_NAME_PREFIX}.${VLAN_TAG_1} + fi + + if [ ! -z "$VLAN_ROUTE_1_IPV4" ]; then + ip route add ${VLAN_ROUTE_1_IPV4} \ + dev ${VLAN_NAME_PREFIX}.${VLAN_TAG_1} + fi + fi + + if [ ! -z "$VLAN_TAG_2" ]; then + ip link add link ${IFACE} name \ + ${VLAN_NAME_PREFIX}.${VLAN_TAG_2} type vlan id ${VLAN_TAG_2} + ip link set ${VLAN_NAME_PREFIX}.${VLAN_TAG_2} up + + if [ ! -z "$VLAN_ADDR_2_IPV6" ]; then + ip -6 addr add ${VLAN_ADDR_2_IPV6} \ + dev ${VLAN_NAME_PREFIX}.${VLAN_TAG_2} + fi + + if [ ! -z "$VLAN_ROUTE_2_IPV6" ]; then + ip -6 route add ${VLAN_ROUTE_2_IPV6} \ + dev ${VLAN_NAME_PREFIX}.${VLAN_TAG_2} + fi + + if [ ! -z "$VLAN_ADDR_2_IPV4" ]; then + ip addr add ${VLAN_ADDR_2_IPV4} \ + dev ${VLAN_NAME_PREFIX}.${VLAN_TAG_2} + fi + + if [ ! -z "$VLAN_ROUTE_2_IPV4" ]; then + ip route add ${VLAN_ROUTE_2_IPV4} \ + dev ${VLAN_NAME_PREFIX}.${VLAN_TAG_2} + fi + fi +fi diff --git a/tests/net/automatic_testing/net_setup_host.conf b/tests/net/automatic_testing/net_setup_host.conf new file mode 100644 index 0000000000000..403b3b4d0f671 --- /dev/null +++ b/tests/net/automatic_testing/net_setup_host.conf @@ -0,0 +1,26 @@ +# +# Configuration options for setting up the host network interface +# + +HWADDR="00:00:5e:00:53:99" + +IPV6_ADDR_1="2001:db8::2" +IPV6_ROUTE_1="2001:db8::/64" + +IPV4_ADDR_1="192.0.2.2" +IPV4_ROUTE_1="192.0.2.0/24" + +VLAN_NAME_PREFIX="vlan" +VLAN_TAG_1=100 +VLAN_TAG_2=200 + +VLAN_ADDR_1_IPV6="2001:db8:100::2" +VLAN_ROUTE_1_IPV6="2001:db8:100::/64" +VLAN_ADDR_2_IPV6="2001:db8:200::2" +VLAN_ROUTE_2_IPV6="2001:db8:200::/64" + +# Documentation addresses from RFC 5737 +VLAN_ADDR_1_IPV4="198.51.100.2" +VLAN_ROUTE_1_IPV4="198.51.100.0/24" +VLAN_ADDR_2_IPV4="203.0.113.2" +VLAN_ROUTE_2_IPV4="203.0.113.0/24" diff --git a/tests/net/automatic_testing/prj.conf b/tests/net/automatic_testing/prj.conf new file mode 100644 index 0000000000000..c5b23e965bfa9 --- /dev/null +++ b/tests/net/automatic_testing/prj.conf @@ -0,0 +1,114 @@ +# +# Configuration file for test application that can be used in networking +# automatic testing. +# + +# Generic kernel configuration +CONFIG_TEST_RANDOM_GENERATOR=y +CONFIG_ENTROPY_GENERATOR=y +CONFIG_INIT_STACKS=y +CONFIG_PRINTK=y + +# Network features to activate +CONFIG_NETWORKING=y +CONFIG_NET_IPV6=y +CONFIG_NET_IPV4=y +CONFIG_NET_DHCPV4=n +CONFIG_NET_UDP=y +CONFIG_NET_TCP=y +CONFIG_NET_STATISTICS=y +CONFIG_NET_SHELL=y + +# Logging +CONFIG_NET_LOG=y +CONFIG_SYS_LOG_SHOW_COLOR=y +CONFIG_SYS_LOG_NET_LEVEL=4 + +# Amount of network buffers +CONFIG_NET_PKT_RX_COUNT=64 +CONFIG_NET_PKT_TX_COUNT=64 +CONFIG_NET_BUF_RX_COUNT=64 +CONFIG_NET_BUF_TX_COUNT=64 + +# Max number of IP address addresses for each network interface +CONFIG_NET_IF_UNICAST_IPV6_ADDR_COUNT=5 +CONFIG_NET_IF_MCAST_IPV6_ADDR_COUNT=5 +CONFIG_NET_IF_UNICAST_IPV4_ADDR_COUNT=1 + +# Network context max count +CONFIG_NET_MAX_CONTEXTS=10 + +# Ethernet is needed for VLAN +CONFIG_NET_L2_ETHERNET=y + +# Echo-server uses net-app +CONFIG_NET_APP_SERVER=y +CONFIG_NET_APP_NEED_IPV6=y +CONFIG_NET_APP_NEED_IPV4=y +CONFIG_NET_APP_SETTINGS=y + +# First ethernet interface will use these settings +CONFIG_NET_APP_MY_IPV6_ADDR="2001:db8::1" +CONFIG_NET_APP_MY_IPV4_ADDR="192.0.2.1" +CONFIG_NET_APP_MY_IPV4_GW="192.0.2.2" + +# 2nd ethernet interface will use these settings +CONFIG_SAMPLE_IPV6_ADDR_1="2001:db8:100::1" +# TEST-NET-2 from RFC 5737 +CONFIG_SAMPLE_IPV4_ADDR_1="198.51.100.1" +CONFIG_SAMPLE_IPV4_GW_1="198.51.100.2" +# VLAN tag for the this interface +CONFIG_NET_VLAN_SAMPLE_TAG_1=100 + +# 3rd ethernet interface will use these settings +CONFIG_SAMPLE_IPV6_ADDR_2="2001:db8:200::1" +CONFIG_NET_VLAN_SAMPLE_TAG_2=200 +# TEST-NET-3 from RFC 5737 +CONFIG_SAMPLE_IPV4_ADDR_2="203.0.113.1" +CONFIG_SAMPLE_IPV4_GW_2="203.0.113.2" + +# Debugging options +CONFIG_NET_DEBUG_NET_PKT=y +CONFIG_NET_DEBUG_L2_ETHERNET=n +CONFIG_NET_DEBUG_ARP=n +CONFIG_NET_DEBUG_IPV4=n +CONFIG_NET_DEBUG_IPV6=n +CONFIG_NET_DEBUG_CORE=n +CONFIG_NET_DEBUG_IF=n +CONFIG_NET_DEBUG_TEST_APP=n + +# VLAN settings. Note that currently SLIP only supports one VLAN tag, +# and that is enforced by Kconfig file. This application will create +# three network interfaces, the first one without any VLAN tag and +# the other two with VLAN tags. +CONFIG_NET_VLAN=y +CONFIG_NET_VLAN_COUNT=3 + +# Full amount of traffic classes +CONFIG_NET_TC_COUNT=8 + +# Enable priority support in net_context +CONFIG_NET_CONTEXT_PRIORITY=y + +# Driver specific options + +# Use static link-layer address for SLIP, so that in host side +# the neighbor information do not need to be flushed. +CONFIG_SLIP_MAC_ADDR="00:00:5E:00:53:88" + +# Static link-layer address for FRDM-K64F board +CONFIG_ETH_MCUX_0_RANDOM_MAC=n +CONFIG_ETH_MCUX_0_MAC3=0x00 +CONFIG_ETH_MCUX_0_MAC3=0x53 +CONFIG_ETH_MCUX_0_MAC3=0x88 + +# Native-posix ethernet settings +CONFIG_ETH_NATIVE_POSIX=y +CONFIG_SYS_LOG_ETHERNET_LEVEL=1 +CONFIG_ETH_NATIVE_POSIX_RANDOM_MAC=n +CONFIG_ETH_NATIVE_POSIX_MAC_ADDR="00:00:5e:00:53:88" +CONFIG_ETH_NATIVE_POSIX_SETUP_SCRIPT="${ZEPHYR_BASE}/tests/net/automatic_testing/net_setup_host" + +# Use static link-layer address for SLIP, so that in host side +# the neighbor information do not need to be flushed. +CONFIG_SLIP_MAC_ADDR="00:00:5E:00:53:88" diff --git a/tests/net/automatic_testing/sample.yaml b/tests/net/automatic_testing/sample.yaml new file mode 100644 index 0000000000000..7cd6aee1d434c --- /dev/null +++ b/tests/net/automatic_testing/sample.yaml @@ -0,0 +1,10 @@ +common: + harness: net + tags: net net_automatic_testing vlan traffic_class +sample: + description: Test network functionality via automatic scripts + name: Network automatic testing app +tests: + test: + min_ram: 32 + depends_on: netif diff --git a/tests/net/automatic_testing/src/common.h b/tests/net/automatic_testing/src/common.h new file mode 100644 index 0000000000000..c956813596e53 --- /dev/null +++ b/tests/net/automatic_testing/src/common.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#define MY_PORT 4242 + +#define MAX_DBG_PRINT 64 + +struct interfaces { + struct net_if *non_vlan; + struct net_if *first_vlan; + struct net_if *second_vlan; +}; + +void start_udp(void); +void stop_udp(void); + +void start_tcp(void); +void stop_tcp(void); + +struct net_pkt *build_reply_pkt(const char *name, + struct net_app_ctx *ctx, + struct net_pkt *pkt); +void pkt_sent(struct net_app_ctx *ctx, int status, + void *token, void *user_data); +void panic(const char *msg); +void quit(void); + +int setup_vlan(struct interfaces *interfaces); +void setup_echo_server(void); +void cleanup_echo_server(void); diff --git a/tests/net/automatic_testing/src/echo-server.c b/tests/net/automatic_testing/src/echo-server.c new file mode 100644 index 0000000000000..470f5d18ed780 --- /dev/null +++ b/tests/net/automatic_testing/src/echo-server.c @@ -0,0 +1,143 @@ +/* echo.c - Networking echo server */ + +/* + * Copyright (c) 2018 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if defined(CONFIG_NET_DEBUG_TEST_APP) +#define SYS_LOG_DOMAIN "net-test/echo" +#define NET_SYS_LOG_LEVEL CONFIG_SYS_LOG_NET_LEVEL +#define NET_LOG_ENABLED 1 +#endif + +#include +#include +#include + +#include +#include +#include + +#include + +#include "common.h" + +/* The startup time needs to be longish if DHCP is enabled as setting + * DHCP up takes some time. + */ +#define APP_STARTUP_TIME K_SECONDS(20) + +#define APP_BANNER "Run echo server" + +struct net_pkt *build_reply_pkt(const char *name, + struct net_app_ctx *ctx, + struct net_pkt *pkt) +{ + struct net_pkt *reply_pkt; + struct net_buf *frag, *tmp; + int header_len = 0, recv_len, reply_len; + + NET_INFO("%s received %d bytes", name, net_pkt_appdatalen(pkt)); + + if (net_pkt_appdatalen(pkt) == 0) { + return NULL; + } + + reply_pkt = net_app_get_net_pkt(ctx, net_pkt_family(pkt), K_FOREVER); + + NET_ASSERT(reply_pkt); + NET_ASSERT(net_pkt_family(reply_pkt) == net_pkt_family(pkt)); + + recv_len = net_pkt_get_len(pkt); + + tmp = pkt->frags; + + /* If we have link layer headers, then get rid of them here. */ + if (recv_len != net_pkt_appdatalen(pkt)) { + /* First fragment will contain IP header so move the data + * down in order to get rid of it. + */ + header_len = net_pkt_appdata(pkt) - tmp->data; + + NET_ASSERT(header_len < CONFIG_NET_BUF_DATA_SIZE); + + /* After this pull, the tmp->data points directly to application + * data. + */ + net_buf_pull(tmp, header_len); + } + + net_pkt_set_appdatalen(reply_pkt, net_pkt_appdatalen(pkt)); + + while (tmp) { + frag = net_app_get_net_buf(ctx, reply_pkt, K_FOREVER); + + if (net_buf_headroom(tmp) == 0) { + /* If there is no link layer headers in the + * received fragment, then get rid of that also + * in the sending fragment. We end up here + * if MTU is larger than fragment size, this + * is typical for ethernet. + */ + net_buf_push(frag, net_buf_headroom(frag)); + + frag->len = 0; /* to make fragment empty */ + + /* Make sure to set the reserve so that + * in sending side we add the link layer + * header if needed. + */ + net_pkt_set_ll_reserve(reply_pkt, 0); + } + + NET_ASSERT_INFO(net_buf_tailroom(frag) >= tmp->len, + "tail %zd longer than len %d", + net_buf_tailroom(frag), tmp->len); + + memcpy(net_buf_add(frag, tmp->len), tmp->data, tmp->len); + + tmp = net_pkt_frag_del(pkt, NULL, tmp); + } + + reply_len = net_pkt_get_len(reply_pkt); + + NET_ASSERT_INFO((recv_len - header_len) == reply_len, + "Received %d bytes, sending %d bytes", + recv_len - header_len, reply_len); + + return reply_pkt; +} + +void pkt_sent(struct net_app_ctx *ctx, + int status, + void *user_data_send, + void *user_data) +{ + if (!status) { + NET_INFO("Sent %d bytes", POINTER_TO_UINT(user_data_send)); + } +} + +void setup_echo_server(void) +{ + if (IS_ENABLED(CONFIG_NET_TCP)) { + start_tcp(); + } + + if (IS_ENABLED(CONFIG_NET_UDP)) { + start_udp(); + } +} + +void cleanup_echo_server(void) +{ + if (IS_ENABLED(CONFIG_NET_TCP)) { + stop_tcp(); + } + + if (IS_ENABLED(CONFIG_NET_UDP)) { + stop_udp(); + } +} diff --git a/tests/net/automatic_testing/src/main.c b/tests/net/automatic_testing/src/main.c new file mode 100644 index 0000000000000..f654817638e5b --- /dev/null +++ b/tests/net/automatic_testing/src/main.c @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2018 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if defined(CONFIG_NET_DEBUG_TEST_APP) +#define SYS_LOG_DOMAIN "net-test/main" +#define NET_SYS_LOG_LEVEL CONFIG_SYS_LOG_NET_LEVEL +#define NET_LOG_ENABLED 1 +#endif + +#include +#include + +#include + +#include "common.h" + +static struct k_sem quit_lock; + +static struct interfaces network_interfaces; + +void panic(const char *msg) +{ + if (msg) { + NET_ERR("%s", msg); + } + + for (;;) { + k_sleep(K_FOREVER); + } +} + +void quit(void) +{ + k_sem_give(&quit_lock); +} + +void iface_cb(struct net_if *iface, void *user_data) +{ + struct interfaces *interfaces = user_data; + + if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) { + return; + } + + if (interfaces->non_vlan == iface) { + NET_DBG("1st interface %p", iface); + return; + } + + if (!interfaces->first_vlan) { + NET_DBG("2nd interface %p", iface); + interfaces->first_vlan = iface; + return; + } + + if (!interfaces->second_vlan) { + NET_DBG("3rd interface %p", iface); + interfaces->second_vlan = iface; + return; + } +} + +static inline int init_app(struct interfaces *interfaces) +{ + struct net_if *iface; + + iface = net_if_get_first_by_type(&NET_L2_GET_NAME(ETHERNET)); + if (!iface) { + NET_ERR("No ethernet interfaces found."); + return -ENOENT; + } + + interfaces->non_vlan = iface; + interfaces->first_vlan = NULL; + interfaces->second_vlan = NULL; + + net_if_foreach(iface_cb, interfaces); + + k_sem_init(&quit_lock, 0, UINT_MAX); + + return 0; +} + +static int setup_vlan_iface(struct net_if *iface, char *ipv6_addr, + char *ipv4_addr, char *ipv4_gw, char *ipv4_netmask) +{ + struct net_if_addr *ifaddr; + struct in_addr addr4; + struct in6_addr addr6; + + if (net_addr_pton(AF_INET6, ipv6_addr, &addr6)) { + NET_ERR("Invalid address: %s", ipv6_addr); + return -EINVAL; + } + + ifaddr = net_if_ipv6_addr_add(iface, &addr6, NET_ADDR_MANUAL, 0); + if (!ifaddr) { + NET_ERR("Cannot add %s to interface %p", ipv6_addr, iface); + return -EINVAL; + } + + if (net_addr_pton(AF_INET, ipv4_addr, &addr4)) { + NET_ERR("Invalid address: %s", ipv4_addr); + return -EINVAL; + } + + ifaddr = net_if_ipv4_addr_add(iface, &addr4, NET_ADDR_MANUAL, 0); + if (!ifaddr) { + NET_ERR("Cannot add %s to interface %p", ipv4_addr, iface); + return -EINVAL; + } + + if (ipv4_gw[0]) { + /* If not empty */ + if (net_addr_pton(AF_INET, ipv4_gw, &addr4)) { + NET_ERR("Invalid gateway: %s", ipv4_gw); + } else { + net_if_ipv4_set_gw(iface, &addr4); + } + } + + if (ipv4_netmask[0]) { + if (net_addr_pton(AF_INET, ipv4_netmask, &addr4)) { + NET_ERR("Invalid netmask: %s", ipv4_netmask); + } else { + net_if_ipv4_set_netmask(iface, &addr4); + } + } + + return 0; +} + +static int setup_vlan_iface_1(struct net_if *iface) +{ + return setup_vlan_iface(iface, + CONFIG_SAMPLE_IPV6_ADDR_1, + CONFIG_SAMPLE_IPV4_ADDR_1, + CONFIG_SAMPLE_IPV4_GW_1, + CONFIG_SAMPLE_IPV4_NETMASK_1); +} + +static int setup_vlan_iface_2(struct net_if *iface) +{ + return setup_vlan_iface(iface, + CONFIG_SAMPLE_IPV6_ADDR_2, + CONFIG_SAMPLE_IPV4_ADDR_2, + CONFIG_SAMPLE_IPV4_GW_2, + CONFIG_SAMPLE_IPV4_NETMASK_2); +} + +static int setup_ip_addresses(struct interfaces *interfaces) +{ + if (interfaces->first_vlan) { + setup_vlan_iface_1(interfaces->first_vlan); + } + + if (interfaces->second_vlan) { + setup_vlan_iface_2(interfaces->second_vlan); + } + + return 0; +} + +void main(void) +{ + int ret; + + ret = init_app(&network_interfaces); + if (ret < 0) { + NET_ERR("Cannot initialize application (%d)", ret); + return; + } + +#if defined(CONFIG_NET_VLAN) + setup_vlan(&network_interfaces); +#endif + + ret = setup_ip_addresses(&network_interfaces); + if (ret < 0) { + NET_ERR("Cannot set IP addresses (%d)", ret); + return; + } + + setup_echo_server(); + + k_sem_take(&quit_lock, K_FOREVER); + + NET_INFO("Stopping..."); + + cleanup_echo_server(); +} diff --git a/tests/net/automatic_testing/src/tcp.c b/tests/net/automatic_testing/src/tcp.c new file mode 100644 index 0000000000000..001b61b33049f --- /dev/null +++ b/tests/net/automatic_testing/src/tcp.c @@ -0,0 +1,198 @@ +/* tcp.c - TCP specific code for echo server */ + +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if defined(CONFIG_NET_DEBUG_TEST_APP) +#define SYS_LOG_DOMAIN "net-test/echo/tcp" +#define NET_SYS_LOG_LEVEL CONFIG_SYS_LOG_NET_LEVEL +#define NET_LOG_ENABLED 1 +#endif + +#include +#include +#include + +#include +#include +#include + +#include + +#include "common.h" + +static struct net_app_ctx tcp; + +/* Note that both tcp and udp can share the same pool but in this + * example the UDP context and TCP context have separate pools. + */ +#if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL) +NET_PKT_TX_SLAB_DEFINE(echo_tx_tcp, 15); +NET_PKT_DATA_POOL_DEFINE(echo_data_tcp, 30); + +static struct k_mem_slab *tx_tcp_slab(void) +{ + return &echo_tx_tcp; +} + +static struct net_buf_pool *data_tcp_pool(void) +{ + return &echo_data_tcp; +} +#else +#define tx_tcp_slab NULL +#define data_tcp_pool NULL +#endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */ + +#if defined(CONFIG_NET_APP_TLS) + +/* The result buf size is set to large enough so that we can receive max size + * buf back. Note that mbedtls needs also be configured to have equal size + * value for its buffer size. See MBEDTLS_SSL_MAX_CONTENT_LEN option in TLS + * config file. + */ +#define RESULT_BUF_SIZE 1500 +static u8_t tls_result[RESULT_BUF_SIZE]; + +#if !defined(CONFIG_NET_APP_TLS_STACK_SIZE) +#define CONFIG_NET_APP_TLS_STACK_SIZE 8192 +#endif /* CONFIG_NET_APP_TLS_STACK_SIZE */ + +#define APP_BANNER "Run TLS echo-server" +#define INSTANCE_INFO "Zephyr TLS echo-server #1" + +/* Note that each net_app context needs its own stack as there will be + * a separate thread needed. + */ +NET_STACK_DEFINE(NET_APP_TLS, net_app_tls_stack, + CONFIG_NET_APP_TLS_STACK_SIZE, CONFIG_NET_APP_TLS_STACK_SIZE); + +#define RX_FIFO_DEPTH 4 +K_MEM_POOL_DEFINE(ssl_pool, 4, 64, RX_FIFO_DEPTH, 4); +#endif /* CONFIG_NET_APP_TLS */ + +#if defined(CONFIG_NET_APP_TLS) +/* Load the certificates and private RSA key. */ + +#include "test_certs.h" + +static int setup_cert(struct net_app_ctx *ctx, + mbedtls_x509_crt *cert, + mbedtls_pk_context *pkey) +{ + int ret; + + ret = mbedtls_x509_crt_parse(cert, rsa_example_cert_der, + rsa_example_cert_der_len); + if (ret != 0) { + NET_ERR("mbedtls_x509_crt_parse returned %d", ret); + return ret; + } + + ret = mbedtls_pk_parse_key(pkey, rsa_example_keypair_der, + rsa_example_keypair_der_len, NULL, 0); + if (ret != 0) { + NET_ERR("mbedtls_pk_parse_key returned %d", ret); + return ret; + } + + return 0; +} +#endif /* CONFIG_NET_APP_TLS */ + +static void tcp_received(struct net_app_ctx *ctx, + struct net_pkt *pkt, + int status, + void *user_data) +{ + static char dbg[MAX_DBG_PRINT + 1]; + struct net_pkt *reply_pkt; + sa_family_t family; + int ret; + + if (!pkt) { + /* EOF condition */ + return; + } + + family = net_pkt_family(pkt); + + snprintk(dbg, MAX_DBG_PRINT, "TCP IPv%c", + family == AF_INET6 ? '6' : '4'); + + reply_pkt = build_reply_pkt(dbg, ctx, pkt); + + net_pkt_unref(pkt); + + if (!reply_pkt) { + return; + } + + ret = net_app_send_pkt(ctx, reply_pkt, NULL, 0, K_NO_WAIT, + UINT_TO_POINTER(net_pkt_get_len(reply_pkt))); + if (ret < 0) { + NET_ERR("Cannot send data to peer (%d)", ret); + net_pkt_unref(reply_pkt); + + quit(); + } +} + +void start_tcp(void) +{ + int ret; + + ret = net_app_init_tcp_server(&tcp, NULL, MY_PORT, NULL); + if (ret < 0) { + NET_ERR("Cannot init TCP service at port %d", MY_PORT); + return; + } + +#if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL) + net_app_set_net_pkt_pool(&tcp, tx_tcp_slab, data_tcp_pool); +#endif + + ret = net_app_set_cb(&tcp, NULL, tcp_received, NULL, NULL); + if (ret < 0) { + NET_ERR("Cannot set callbacks (%d)", ret); + net_app_release(&tcp); + return; + } + +#if defined(CONFIG_NET_APP_TLS) + ret = net_app_server_tls(&tcp, + tls_result, + sizeof(tls_result), + APP_BANNER, + INSTANCE_INFO, + strlen(INSTANCE_INFO), + setup_cert, + NULL, + &ssl_pool, + net_app_tls_stack, + K_THREAD_STACK_SIZEOF(net_app_tls_stack)); + if (ret < 0) { + NET_ERR("Cannot init TLS"); + } +#endif + + net_app_server_enable(&tcp); + + ret = net_app_listen(&tcp); + if (ret < 0) { + NET_ERR("Cannot wait connection (%d)", ret); + net_app_release(&tcp); + return; + } +} + +void stop_tcp(void) +{ + net_app_server_disable(&tcp); + + net_app_close(&tcp); + net_app_release(&tcp); +} diff --git a/tests/net/automatic_testing/src/test_certs.h b/tests/net/automatic_testing/src/test_certs.h new file mode 100644 index 0000000000000..bf9c2bdff8b1a --- /dev/null +++ b/tests/net/automatic_testing/src/test_certs.h @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2017 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef __TEST_CERTS_H__ +#define __TEST_CERTS_H__ + +/* Read the README file in net-tools repo how to generate + * the key and the certificate. + */ + +/* This is the private key in pkcs#8 format. */ +static const unsigned char rsa_example_keypair_der[] = { + 0x30, 0x82, 0x04, 0xbe, 0x02, 0x01, 0x00, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x04, 0x82, + 0x04, 0xa8, 0x30, 0x82, 0x04, 0xa4, 0x02, 0x01, + 0x00, 0x02, 0x82, 0x01, 0x01, 0x00, 0xc6, 0x00, + 0x7b, 0x0d, 0xd1, 0x17, 0x43, 0x6b, 0xa9, 0xa7, + 0x79, 0x9c, 0x0f, 0x8d, 0x77, 0x91, 0xd2, 0xf7, + 0x47, 0x35, 0xb3, 0x17, 0xe2, 0xdd, 0xed, 0x6d, + 0x01, 0xf9, 0xb1, 0x92, 0xc9, 0x48, 0x80, 0xe0, + 0x1f, 0xcf, 0xb7, 0xa4, 0x5f, 0xf0, 0x36, 0xea, + 0xbf, 0xe1, 0x33, 0xf8, 0xa9, 0xc5, 0xe6, 0xd4, + 0x19, 0x8b, 0x82, 0x25, 0xd9, 0x19, 0x74, 0x70, + 0x79, 0xec, 0xc6, 0x68, 0xc9, 0xef, 0xce, 0x1a, + 0xa9, 0xf0, 0xb7, 0x01, 0x35, 0x91, 0xff, 0xd3, + 0x75, 0x6e, 0x02, 0xba, 0x06, 0x9a, 0x2a, 0xac, + 0xcf, 0x22, 0xbf, 0x2b, 0x1f, 0xc1, 0x72, 0x38, + 0x22, 0x35, 0xea, 0xda, 0x6f, 0xdd, 0x67, 0xa2, + 0x2b, 0x19, 0x38, 0x19, 0x0e, 0x44, 0xd1, 0x71, + 0x38, 0xb4, 0x6d, 0x26, 0x85, 0xd6, 0xc6, 0xbe, + 0xc1, 0x6f, 0x3c, 0xee, 0xaf, 0x94, 0x3c, 0x05, + 0x56, 0x4e, 0xad, 0x53, 0x81, 0x8b, 0xd4, 0x23, + 0x31, 0x69, 0x72, 0x27, 0x93, 0xb4, 0x3a, 0xac, + 0x23, 0xe8, 0x10, 0xae, 0xf5, 0x9f, 0x0b, 0xa6, + 0x6e, 0xd3, 0x73, 0xca, 0x18, 0x11, 0xca, 0xbe, + 0x71, 0x00, 0x56, 0x29, 0x34, 0x54, 0xcc, 0xda, + 0x29, 0x5b, 0x26, 0x29, 0x99, 0x4d, 0x5f, 0xa1, + 0xa6, 0xb9, 0xcb, 0x2b, 0xb2, 0x0f, 0x10, 0x00, + 0x04, 0xa9, 0x11, 0x2c, 0x48, 0xb1, 0x99, 0xa5, + 0xca, 0x7c, 0x67, 0xa5, 0xbe, 0x14, 0x20, 0x12, + 0xb7, 0x3b, 0x7a, 0x4f, 0xdc, 0xc7, 0xd5, 0x2d, + 0x04, 0x66, 0xbb, 0xf5, 0x0c, 0xcd, 0xf1, 0x32, + 0x39, 0xd7, 0x51, 0x9b, 0xba, 0xdb, 0xf1, 0xa7, + 0xfe, 0x2d, 0x9a, 0xe6, 0x9c, 0x6b, 0x54, 0xda, + 0xf1, 0xdd, 0x48, 0xf9, 0xd7, 0xf0, 0x35, 0x7c, + 0x8e, 0x24, 0x7e, 0x44, 0x2f, 0xf3, 0xbf, 0x39, + 0x0e, 0x96, 0xab, 0xe1, 0x45, 0x03, 0x8b, 0x54, + 0xdc, 0xe1, 0xb6, 0x11, 0x81, 0x21, 0x02, 0x03, + 0x01, 0x00, 0x01, 0x02, 0x82, 0x01, 0x01, 0x00, + 0xaf, 0x01, 0x1b, 0xc2, 0x41, 0x8d, 0xa3, 0x3c, + 0x94, 0x8d, 0xfe, 0xf1, 0x8d, 0xd9, 0x86, 0xd6, + 0x6f, 0xb5, 0xb9, 0xcd, 0xdf, 0xc6, 0x7b, 0xf1, + 0x95, 0x79, 0xa4, 0xa6, 0x52, 0x6a, 0x1f, 0xd8, + 0x22, 0x78, 0x76, 0x05, 0x7d, 0x16, 0x1e, 0xe9, + 0xcd, 0x50, 0x3d, 0x5a, 0x4a, 0x3a, 0x3d, 0xac, + 0x3a, 0x72, 0x3b, 0x45, 0xd0, 0x39, 0xad, 0xa2, + 0x4f, 0x05, 0x5e, 0xee, 0x85, 0x0b, 0x77, 0x78, + 0xaa, 0x1d, 0x6d, 0x18, 0xe2, 0x57, 0x82, 0x46, + 0xd1, 0xeb, 0xcf, 0xbd, 0xf5, 0x51, 0x00, 0xa6, + 0xe0, 0x93, 0x2e, 0xdd, 0x52, 0x44, 0x46, 0x64, + 0x0f, 0x95, 0x47, 0xbf, 0x8a, 0x1d, 0x99, 0x84, + 0x02, 0xa6, 0x16, 0x1b, 0xcb, 0x36, 0x72, 0x23, + 0x27, 0x4f, 0x40, 0x44, 0x50, 0xb9, 0x1c, 0xd0, + 0x91, 0x87, 0x66, 0xa6, 0xf0, 0x1f, 0x33, 0x36, + 0xfc, 0xb9, 0xe6, 0xe7, 0xd1, 0x50, 0x9e, 0x8d, + 0x2f, 0x66, 0xbd, 0x7b, 0x52, 0x20, 0xf8, 0x7f, + 0x51, 0x3a, 0x48, 0xac, 0x3a, 0x32, 0xde, 0x65, + 0x1a, 0xac, 0x9c, 0xa6, 0x6a, 0xec, 0x44, 0xee, + 0x06, 0x95, 0x05, 0xf2, 0x4a, 0xc5, 0x0c, 0x7d, + 0x51, 0x1f, 0xaa, 0xb4, 0xc0, 0xb9, 0x07, 0x87, + 0xff, 0x2b, 0x28, 0x12, 0x80, 0xea, 0x3c, 0x9e, + 0xb1, 0x81, 0xef, 0xfd, 0xd4, 0xe3, 0x09, 0x1f, + 0xc9, 0x9b, 0xf3, 0xea, 0xa1, 0x7d, 0x0d, 0x2a, + 0x4d, 0x6d, 0x9c, 0x06, 0xdf, 0x87, 0x51, 0xf4, + 0x7b, 0xc0, 0x7d, 0xc3, 0x6c, 0x78, 0x59, 0xe4, + 0xb7, 0x27, 0xd7, 0xf7, 0xf5, 0x6f, 0x63, 0xff, + 0x27, 0xe1, 0x57, 0x86, 0xf3, 0x08, 0xa3, 0xf9, + 0x6b, 0xef, 0xbe, 0xd0, 0x72, 0x29, 0x44, 0x3b, + 0xa9, 0x06, 0x4c, 0xde, 0xf7, 0x0e, 0x63, 0x22, + 0x90, 0x33, 0x2e, 0x14, 0x0c, 0xff, 0x83, 0x42, + 0xf0, 0x96, 0xa3, 0x4a, 0xf5, 0x5b, 0x01, 0xf1, + 0x02, 0x81, 0x81, 0x00, 0xe3, 0xa4, 0x26, 0x57, + 0x02, 0x11, 0x99, 0x42, 0xbb, 0x40, 0x10, 0x68, + 0xf4, 0xcc, 0xa1, 0x2f, 0x3c, 0x84, 0x0e, 0x7c, + 0xa5, 0xfc, 0x3c, 0x68, 0x18, 0x8e, 0xe9, 0x9c, + 0x2f, 0x1c, 0x49, 0x57, 0xe0, 0xdd, 0x89, 0xca, + 0xf9, 0x4f, 0x99, 0x22, 0xe5, 0xd4, 0x30, 0x0a, + 0x60, 0xa7, 0x02, 0x1c, 0x04, 0x2e, 0x07, 0x1c, + 0x0f, 0xb6, 0x60, 0x59, 0xeb, 0x8f, 0x38, 0x91, + 0x36, 0x0b, 0xb3, 0x6c, 0x13, 0xd8, 0xaf, 0x3a, + 0xe5, 0x70, 0x47, 0x4c, 0xeb, 0x6d, 0x29, 0xd1, + 0x66, 0xa7, 0xe7, 0x97, 0x51, 0x5c, 0x6d, 0xda, + 0xf7, 0x6c, 0xb3, 0x31, 0xd2, 0xd9, 0x5c, 0x99, + 0xaa, 0x84, 0x32, 0x2d, 0x40, 0xfb, 0xf0, 0xa8, + 0x85, 0x3b, 0x2c, 0x86, 0x87, 0x9b, 0xe7, 0xb4, + 0x9f, 0x21, 0x13, 0x50, 0xd4, 0x56, 0x6b, 0x65, + 0xda, 0x3c, 0xf6, 0x9e, 0xc1, 0xe4, 0xee, 0xde, + 0x64, 0x10, 0x29, 0x5d, 0x02, 0x81, 0x81, 0x00, + 0xde, 0xab, 0x15, 0x8b, 0xc5, 0xd1, 0x8f, 0xc1, + 0x51, 0xd7, 0x54, 0x47, 0xa2, 0xc5, 0x7f, 0xae, + 0x87, 0xcc, 0xdd, 0xc2, 0x9a, 0x1e, 0x8f, 0x15, + 0xb2, 0x9d, 0x37, 0xfe, 0x4d, 0x43, 0xb2, 0x52, + 0xe7, 0xb2, 0x0e, 0xe0, 0x1f, 0x48, 0xde, 0x07, + 0x8e, 0xff, 0x74, 0xbd, 0xfc, 0xbd, 0x27, 0x2a, + 0x7a, 0xc0, 0x66, 0x09, 0xaf, 0x5b, 0x04, 0xed, + 0xbc, 0x9c, 0x9f, 0xe6, 0x43, 0xa3, 0x9e, 0xc7, + 0x73, 0x3d, 0xcb, 0x6f, 0x69, 0x58, 0x43, 0xe8, + 0xfe, 0xd8, 0x06, 0xb9, 0xcb, 0x09, 0xbc, 0xc7, + 0xb5, 0x2e, 0xed, 0xcc, 0xdf, 0x29, 0x26, 0x6c, + 0x01, 0x33, 0xfd, 0xa3, 0xad, 0xc6, 0x82, 0x0b, + 0xb2, 0x14, 0x75, 0xb2, 0xf6, 0xba, 0xc1, 0xae, + 0xdb, 0x4c, 0x08, 0x09, 0x47, 0x09, 0xc9, 0x88, + 0x3d, 0x69, 0x72, 0xeb, 0x78, 0xd9, 0xf6, 0x5a, + 0xe7, 0xcf, 0x58, 0xf1, 0xdb, 0x22, 0x46, 0x95, + 0x02, 0x81, 0x80, 0x47, 0x0b, 0xe6, 0x43, 0x65, + 0x8b, 0xe4, 0x8f, 0x65, 0x2d, 0x9b, 0x71, 0x86, + 0x65, 0x8f, 0x30, 0xd4, 0xec, 0x4a, 0x31, 0x31, + 0x04, 0xc6, 0x3d, 0x9e, 0xdc, 0x8b, 0xa0, 0x89, + 0x3b, 0x84, 0xd0, 0x7e, 0x87, 0xe6, 0x07, 0x89, + 0x44, 0x04, 0x81, 0x55, 0xc5, 0xa1, 0xb0, 0xbb, + 0x75, 0xa4, 0x3c, 0xe6, 0x39, 0x0f, 0x52, 0xfc, + 0xbc, 0x68, 0xd9, 0xe7, 0xa0, 0xed, 0x80, 0xb4, + 0xa3, 0x76, 0x3e, 0x16, 0xfb, 0x9b, 0xc4, 0xff, + 0xf2, 0xc8, 0xee, 0x02, 0xd4, 0x31, 0x26, 0xff, + 0xa9, 0x0b, 0xa8, 0xb8, 0x65, 0xe6, 0x8b, 0x5e, + 0x89, 0x78, 0x6b, 0xe6, 0x68, 0x9c, 0x74, 0x01, + 0x39, 0xfd, 0x07, 0x0e, 0xaf, 0x12, 0xef, 0x25, + 0x95, 0xd0, 0x51, 0xb4, 0x65, 0x3e, 0x2d, 0xab, + 0x34, 0x81, 0x78, 0xca, 0xac, 0xa6, 0x1c, 0x45, + 0xbe, 0x9d, 0x21, 0xf3, 0x55, 0xe6, 0x0f, 0x56, + 0x9f, 0x8c, 0xa9, 0x02, 0x81, 0x81, 0x00, 0xd6, + 0xcf, 0x45, 0xa1, 0xe9, 0xdd, 0x19, 0x49, 0xee, + 0xa2, 0x44, 0xf4, 0x36, 0xdb, 0x41, 0x2a, 0x26, + 0x68, 0x45, 0x27, 0x8f, 0x77, 0x1b, 0xa2, 0x5b, + 0x05, 0xc1, 0x16, 0xdb, 0x83, 0x0b, 0x6d, 0x5f, + 0x88, 0x2f, 0x41, 0x97, 0xec, 0xbc, 0x05, 0x9e, + 0xb6, 0x62, 0xf1, 0x45, 0xf9, 0x54, 0xe7, 0x6e, + 0x34, 0x5e, 0x3b, 0xc9, 0xa7, 0x8d, 0x41, 0xac, + 0x4b, 0x8d, 0x16, 0x9e, 0xfa, 0x22, 0x1b, 0x57, + 0xd9, 0xad, 0x80, 0x75, 0xa8, 0x69, 0x1b, 0x45, + 0x7f, 0x32, 0x3d, 0xda, 0xda, 0x31, 0xfc, 0x97, + 0x96, 0x06, 0x63, 0x36, 0x86, 0x84, 0x42, 0x7d, + 0x86, 0xa2, 0x92, 0x5c, 0xfa, 0xbc, 0x4a, 0x7c, + 0x29, 0xb0, 0xc2, 0x4d, 0xb4, 0x2d, 0x55, 0x7a, + 0xca, 0x06, 0x96, 0xac, 0xa9, 0x8c, 0x7a, 0x9a, + 0x84, 0x38, 0x11, 0x62, 0x2d, 0x23, 0xb4, 0xdf, + 0x1c, 0xf1, 0x39, 0xa1, 0xa5, 0x97, 0x61, 0x02, + 0x81, 0x80, 0x6c, 0xfb, 0x23, 0x03, 0xa6, 0xa4, + 0x29, 0x03, 0x36, 0x84, 0x86, 0x95, 0x6d, 0x86, + 0x32, 0x98, 0xbe, 0x0a, 0xa2, 0xba, 0x47, 0xd0, + 0x29, 0x3f, 0x8c, 0xa6, 0x28, 0x93, 0x6f, 0x25, + 0x69, 0x38, 0x38, 0x03, 0xe5, 0x9b, 0xc8, 0x48, + 0x35, 0xc7, 0x26, 0xc0, 0x75, 0xd2, 0x95, 0x5b, + 0x51, 0x6d, 0x28, 0x88, 0xb1, 0x4b, 0xf0, 0x59, + 0x18, 0x15, 0x9d, 0x9e, 0x48, 0xb5, 0x67, 0x0c, + 0xb0, 0x83, 0xfa, 0x45, 0x48, 0x4d, 0xaa, 0xba, + 0xe3, 0xdf, 0x76, 0x60, 0x06, 0x09, 0x0d, 0x72, + 0x8c, 0x0c, 0xd2, 0x70, 0x39, 0xed, 0x19, 0xd8, + 0x1b, 0x64, 0x33, 0xce, 0x12, 0xa8, 0xb3, 0xa0, + 0xf5, 0x5f, 0xcc, 0xf7, 0x1a, 0x63, 0x5d, 0x44, + 0x61, 0xc6, 0xa0, 0x54, 0xe9, 0x01, 0xa6, 0x54, + 0x16, 0xa7, 0xa0, 0x9e, 0xc9, 0xd8, 0x9c, 0x19, + 0x04, 0xe1, 0x5e, 0xd5, 0x37, 0xbb, 0xfd, 0x76, + 0x77, 0x75, +}; + +static const unsigned int rsa_example_keypair_der_len = + sizeof(rsa_example_keypair_der); + +static const unsigned char rsa_example_cert_der[] = { + 0x30, 0x82, 0x02, 0xfb, 0x30, 0x82, 0x01, 0xe3, + 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, + 0xee, 0x10, 0x1f, 0xc1, 0xf2, 0x30, 0xe9, 0x11, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, + 0x14, 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, + 0x04, 0x03, 0x0c, 0x09, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x68, 0x6f, 0x73, 0x74, 0x30, 0x1e, 0x17, + 0x0d, 0x31, 0x37, 0x30, 0x36, 0x32, 0x36, 0x31, + 0x30, 0x35, 0x36, 0x31, 0x30, 0x5a, 0x17, 0x0d, + 0x34, 0x34, 0x31, 0x31, 0x31, 0x31, 0x31, 0x30, + 0x35, 0x36, 0x31, 0x30, 0x5a, 0x30, 0x14, 0x31, + 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, 0x03, + 0x0c, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, + 0x6f, 0x73, 0x74, 0x30, 0x82, 0x01, 0x22, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, + 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, + 0x82, 0x01, 0x01, 0x00, 0xc6, 0x00, 0x7b, 0x0d, + 0xd1, 0x17, 0x43, 0x6b, 0xa9, 0xa7, 0x79, 0x9c, + 0x0f, 0x8d, 0x77, 0x91, 0xd2, 0xf7, 0x47, 0x35, + 0xb3, 0x17, 0xe2, 0xdd, 0xed, 0x6d, 0x01, 0xf9, + 0xb1, 0x92, 0xc9, 0x48, 0x80, 0xe0, 0x1f, 0xcf, + 0xb7, 0xa4, 0x5f, 0xf0, 0x36, 0xea, 0xbf, 0xe1, + 0x33, 0xf8, 0xa9, 0xc5, 0xe6, 0xd4, 0x19, 0x8b, + 0x82, 0x25, 0xd9, 0x19, 0x74, 0x70, 0x79, 0xec, + 0xc6, 0x68, 0xc9, 0xef, 0xce, 0x1a, 0xa9, 0xf0, + 0xb7, 0x01, 0x35, 0x91, 0xff, 0xd3, 0x75, 0x6e, + 0x02, 0xba, 0x06, 0x9a, 0x2a, 0xac, 0xcf, 0x22, + 0xbf, 0x2b, 0x1f, 0xc1, 0x72, 0x38, 0x22, 0x35, + 0xea, 0xda, 0x6f, 0xdd, 0x67, 0xa2, 0x2b, 0x19, + 0x38, 0x19, 0x0e, 0x44, 0xd1, 0x71, 0x38, 0xb4, + 0x6d, 0x26, 0x85, 0xd6, 0xc6, 0xbe, 0xc1, 0x6f, + 0x3c, 0xee, 0xaf, 0x94, 0x3c, 0x05, 0x56, 0x4e, + 0xad, 0x53, 0x81, 0x8b, 0xd4, 0x23, 0x31, 0x69, + 0x72, 0x27, 0x93, 0xb4, 0x3a, 0xac, 0x23, 0xe8, + 0x10, 0xae, 0xf5, 0x9f, 0x0b, 0xa6, 0x6e, 0xd3, + 0x73, 0xca, 0x18, 0x11, 0xca, 0xbe, 0x71, 0x00, + 0x56, 0x29, 0x34, 0x54, 0xcc, 0xda, 0x29, 0x5b, + 0x26, 0x29, 0x99, 0x4d, 0x5f, 0xa1, 0xa6, 0xb9, + 0xcb, 0x2b, 0xb2, 0x0f, 0x10, 0x00, 0x04, 0xa9, + 0x11, 0x2c, 0x48, 0xb1, 0x99, 0xa5, 0xca, 0x7c, + 0x67, 0xa5, 0xbe, 0x14, 0x20, 0x12, 0xb7, 0x3b, + 0x7a, 0x4f, 0xdc, 0xc7, 0xd5, 0x2d, 0x04, 0x66, + 0xbb, 0xf5, 0x0c, 0xcd, 0xf1, 0x32, 0x39, 0xd7, + 0x51, 0x9b, 0xba, 0xdb, 0xf1, 0xa7, 0xfe, 0x2d, + 0x9a, 0xe6, 0x9c, 0x6b, 0x54, 0xda, 0xf1, 0xdd, + 0x48, 0xf9, 0xd7, 0xf0, 0x35, 0x7c, 0x8e, 0x24, + 0x7e, 0x44, 0x2f, 0xf3, 0xbf, 0x39, 0x0e, 0x96, + 0xab, 0xe1, 0x45, 0x03, 0x8b, 0x54, 0xdc, 0xe1, + 0xb6, 0x11, 0x81, 0x21, 0x02, 0x03, 0x01, 0x00, + 0x01, 0xa3, 0x50, 0x30, 0x4e, 0x30, 0x1d, 0x06, + 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, + 0xa4, 0xef, 0x6d, 0xdc, 0x9b, 0x23, 0xc5, 0x3a, + 0xdd, 0x34, 0xd9, 0x01, 0x1c, 0x68, 0x03, 0x53, + 0xae, 0x92, 0xc2, 0xc9, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, 0x80, + 0x14, 0xa4, 0xef, 0x6d, 0xdc, 0x9b, 0x23, 0xc5, + 0x3a, 0xdd, 0x34, 0xd9, 0x01, 0x1c, 0x68, 0x03, + 0x53, 0xae, 0x92, 0xc2, 0xc9, 0x30, 0x0c, 0x06, + 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, 0x03, + 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, + 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x67, + 0x65, 0xbf, 0x93, 0x89, 0xde, 0x4f, 0x71, 0xff, + 0x1c, 0x93, 0x68, 0xa0, 0x64, 0x09, 0x5e, 0x95, + 0x94, 0xf5, 0xd5, 0xf4, 0x6b, 0x20, 0x32, 0xd8, + 0x04, 0x80, 0xac, 0xf8, 0x52, 0x36, 0x7a, 0x38, + 0x83, 0xae, 0xab, 0x29, 0x22, 0x42, 0x71, 0x7e, + 0xea, 0xe5, 0x4f, 0x71, 0xac, 0x44, 0x3f, 0x9e, + 0x5e, 0x49, 0x22, 0x05, 0xee, 0xa6, 0x7b, 0xab, + 0x56, 0x2e, 0xb3, 0x9a, 0x35, 0x1a, 0x88, 0xc3, + 0x54, 0x9b, 0xfd, 0xac, 0x65, 0x54, 0xaf, 0x21, + 0xa7, 0xe0, 0xdd, 0x62, 0x29, 0x8c, 0xae, 0x26, + 0x0b, 0x84, 0x1f, 0x69, 0x78, 0x84, 0xc6, 0x7e, + 0xcf, 0xc8, 0xf5, 0x92, 0x8c, 0x05, 0xa8, 0x13, + 0x38, 0xcd, 0x0b, 0x98, 0x53, 0xfb, 0xdd, 0x8d, + 0x51, 0x90, 0xa8, 0x51, 0xfa, 0x52, 0xbe, 0x28, + 0xd4, 0x71, 0x50, 0x73, 0x1f, 0xb0, 0xb6, 0x0e, + 0x45, 0xb1, 0x47, 0x41, 0x06, 0xd9, 0x1d, 0x7a, + 0x34, 0xe7, 0x80, 0x2e, 0x0c, 0x02, 0x50, 0x97, + 0xde, 0xa8, 0x7a, 0x84, 0x2c, 0x1d, 0xf4, 0x51, + 0x56, 0xa5, 0x52, 0xb5, 0x04, 0x2e, 0xcb, 0xdd, + 0x8b, 0x2e, 0x16, 0xc6, 0xde, 0xc8, 0xe9, 0x8d, + 0xee, 0x5e, 0xb6, 0xa0, 0xe0, 0x2b, 0x85, 0x2a, + 0x89, 0x7b, 0xba, 0x68, 0x80, 0x2b, 0xfb, 0x6e, + 0x2e, 0x80, 0xe7, 0x7a, 0x97, 0x09, 0xb5, 0x2f, + 0x20, 0x8e, 0xed, 0xbc, 0x98, 0x6f, 0x95, 0xd5, + 0x5b, 0x3d, 0x26, 0x19, 0x26, 0x14, 0x39, 0x82, + 0xa8, 0xa8, 0x42, 0x46, 0xab, 0x59, 0x93, 0x47, + 0x83, 0xf7, 0x79, 0xbf, 0x73, 0xb5, 0x5d, 0x5d, + 0x78, 0xfe, 0x62, 0xac, 0xed, 0xb7, 0x1e, 0x4a, + 0xad, 0xc3, 0x99, 0x39, 0x7d, 0x3e, 0x30, 0x21, + 0x26, 0x1d, 0x66, 0xdb, 0x0d, 0xf3, 0xba, 0x87, + 0x46, 0xf0, 0x04, 0xfc, 0xc3, 0xbe, 0x84, 0x85, + 0x3c, 0x01, 0xef, 0xe0, 0x68, 0x65, 0xee, +}; + +static const unsigned int rsa_example_cert_der_len = + sizeof(rsa_example_cert_der); + +#endif diff --git a/tests/net/automatic_testing/src/udp.c b/tests/net/automatic_testing/src/udp.c new file mode 100644 index 0000000000000..1644db609abb4 --- /dev/null +++ b/tests/net/automatic_testing/src/udp.c @@ -0,0 +1,231 @@ +/* udp.c - UDP specific code for echo server */ + +/* + * Copyright (c) 2017 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if defined(CONFIG_NET_DEBUG_TEST_APP) +#define SYS_LOG_DOMAIN "net-test/echo/udp" +#define NET_SYS_LOG_LEVEL CONFIG_SYS_LOG_NET_LEVEL +#define NET_LOG_ENABLED 1 +#endif + +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "common.h" + +static struct net_app_ctx udp; + +/* Note that both tcp and udp can share the same pool but in this + * example the UDP context and TCP context have separate pools. + */ +#if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL) +NET_PKT_TX_SLAB_DEFINE(echo_tx_udp, 5); +NET_PKT_DATA_POOL_DEFINE(echo_data_udp, 20); + +static struct k_mem_slab *tx_udp_slab(void) +{ + return &echo_tx_udp; +} + +static struct net_buf_pool *data_udp_pool(void) +{ + return &echo_data_udp; +} +#else +#define tx_udp_slab NULL +#define data_udp_pool NULL +#endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */ + +#if defined(CONFIG_NET_APP_DTLS) + +/* The result buf size is set to large enough so that we can receive max size + * buf back. Note that mbedtls needs also be configured to have equal size + * value for its buffer size. See MBEDTLS_SSL_MAX_CONTENT_LEN option in TLS + * config file. + */ +#define RESULT_BUF_SIZE 1500 +static u8_t dtls_result[RESULT_BUF_SIZE]; + +#define APP_BANNER "Run DTLS echo-server" +#define INSTANCE_INFO "Zephyr DTLS echo-server #1" + +/* Note that each net_app context needs its own stack as there will be + * a separate thread needed. + */ +NET_STACK_DEFINE(NET_APP_DTLS, net_app_dtls_stack, + CONFIG_NET_APP_TLS_STACK_SIZE, CONFIG_NET_APP_TLS_STACK_SIZE); + +#define RX_FIFO_DEPTH 4 +K_MEM_POOL_DEFINE(dtls_pool, 4, 64, RX_FIFO_DEPTH, 4); +#endif /* CONFIG_NET_APP_TLS */ + +#if defined(CONFIG_NET_APP_DTLS) +/* Load the certificates and private RSA key. */ + +#include "test_certs.h" + +static int setup_cert(struct net_app_ctx *ctx, + mbedtls_x509_crt *cert, + mbedtls_pk_context *pkey) +{ + int ret; + + ret = mbedtls_x509_crt_parse(cert, rsa_example_cert_der, + rsa_example_cert_der_len); + if (ret != 0) { + NET_ERR("mbedtls_x509_crt_parse returned %d", ret); + return ret; + } + + ret = mbedtls_pk_parse_key(pkey, rsa_example_keypair_der, + rsa_example_keypair_der_len, NULL, 0); + if (ret != 0) { + NET_ERR("mbedtls_pk_parse_key returned %d", ret); + return ret; + } + + return 0; +} +#endif /* CONFIG_NET_APP_DTLS */ + +static inline void set_dst_addr(sa_family_t family, + struct net_pkt *pkt, + struct sockaddr *dst_addr) +{ + struct net_udp_hdr hdr, *udp_hdr; + + udp_hdr = net_udp_get_hdr(pkt, &hdr); + if (!udp_hdr) { + return; + } + +#if defined(CONFIG_NET_IPV6) + if (family == AF_INET6) { + net_ipaddr_copy(&net_sin6(dst_addr)->sin6_addr, + &NET_IPV6_HDR(pkt)->src); + net_sin6(dst_addr)->sin6_family = AF_INET6; + net_sin6(dst_addr)->sin6_port = udp_hdr->src_port; + } +#endif /* CONFIG_NET_IPV6) */ + +#if defined(CONFIG_NET_IPV4) + if (family == AF_INET) { + net_ipaddr_copy(&net_sin(dst_addr)->sin_addr, + &NET_IPV4_HDR(pkt)->src); + net_sin(dst_addr)->sin_family = AF_INET; + net_sin(dst_addr)->sin_port = udp_hdr->src_port; + } +#endif /* CONFIG_NET_IPV6) */ +} + +static void udp_received(struct net_app_ctx *ctx, + struct net_pkt *pkt, + int status, + void *user_data) +{ + static char dbg[MAX_DBG_PRINT + 1]; + struct net_pkt *reply_pkt; + struct sockaddr dst_addr; + sa_family_t family = net_pkt_family(pkt); + socklen_t dst_len; + u32_t pkt_len; + int ret; + + snprintk(dbg, MAX_DBG_PRINT, "UDP IPv%c", + family == AF_INET6 ? '6' : '4'); + + if (family == AF_INET6) { + dst_len = sizeof(struct sockaddr_in6); + } else { + dst_len = sizeof(struct sockaddr_in); + } + + /* Note that for DTLS swapping the source/destination address has no + * effect as the user data is sent in a DTLS tunnel where tunnel end + * points are already set. + */ + set_dst_addr(family, pkt, &dst_addr); + + reply_pkt = build_reply_pkt(dbg, ctx, pkt); + + net_pkt_unref(pkt); + + if (!reply_pkt) { + return; + } + + pkt_len = net_pkt_appdatalen(reply_pkt); + + ret = net_app_send_pkt(ctx, reply_pkt, &dst_addr, dst_len, K_NO_WAIT, + UINT_TO_POINTER(pkt_len)); + if (ret < 0) { + NET_ERR("Cannot send data to peer (%d)", ret); + net_pkt_unref(reply_pkt); + } +} + +void start_udp(void) +{ + int ret; + + ret = net_app_init_udp_server(&udp, NULL, MY_PORT, NULL); + if (ret < 0) { + NET_ERR("Cannot init UDP service at port %d", MY_PORT); + return; + } + +#if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL) + net_app_set_net_pkt_pool(&udp, tx_udp_slab, data_udp_pool); +#endif + + ret = net_app_set_cb(&udp, NULL, udp_received, pkt_sent, NULL); + if (ret < 0) { + NET_ERR("Cannot set callbacks (%d)", ret); + net_app_release(&udp); + return; + } + +#if defined(CONFIG_NET_APP_DTLS) + ret = net_app_server_tls(&udp, + dtls_result, + sizeof(dtls_result), + APP_BANNER, + INSTANCE_INFO, + strlen(INSTANCE_INFO), + setup_cert, + NULL, + &dtls_pool, + net_app_dtls_stack, + K_THREAD_STACK_SIZEOF(net_app_dtls_stack)); + if (ret < 0) { + NET_ERR("Cannot init DTLS"); + } +#endif + + net_app_server_enable(&udp); + + ret = net_app_listen(&udp); + if (ret < 0) { + NET_ERR("Cannot wait connection (%d)", ret); + net_app_release(&udp); + return; + } +} + +void stop_udp(void) +{ + net_app_close(&udp); + net_app_release(&udp); +} diff --git a/tests/net/automatic_testing/src/vlan.c b/tests/net/automatic_testing/src/vlan.c new file mode 100644 index 0000000000000..4787c3aa8d66b --- /dev/null +++ b/tests/net/automatic_testing/src/vlan.c @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2018 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if defined(CONFIG_NET_DEBUG_TEST_APP) +#define SYS_LOG_DOMAIN "net-test/vlan" +#define NET_SYS_LOG_LEVEL CONFIG_SYS_LOG_NET_LEVEL +#define NET_LOG_ENABLED 1 +#endif + +#include +#include + +#include +#include +#include +#include + +#include "common.h" + +#if CONFIG_NET_VLAN_COUNT > 1 +#define CREATE_MULTIPLE_TAGS +#endif + +int setup_vlan(struct interfaces *interfaces) +{ + int ret; + + /* For SLIP technology, we create one VLAN interface */ +#if !defined(CREATE_MULTIPLE_TAGS) + ret = net_eth_vlan_enable(interfaces->non_vlan, + CONFIG_SAMPLE_VLAN_TAG_1); + if (ret < 0) { + NET_ERR("Cannot enable VLAN for tag %d (%d)", + CONFIG_SAMPLE_VLAN_TAG_1, ret); + } +#endif + +#if defined(CREATE_MULTIPLE_TAGS) + /* This sample has two VLANs. First the VLAN needs to be + * added to the interface so that IPv6 DAD can work properly. + */ + ret = net_eth_vlan_enable(interfaces->first_vlan, + CONFIG_SAMPLE_VLAN_TAG_1); + if (ret < 0) { + NET_ERR("Cannot enable VLAN for tag %d (%d)", + CONFIG_SAMPLE_VLAN_TAG_1, ret); + } + + ret = net_eth_vlan_enable(interfaces->second_vlan, + CONFIG_SAMPLE_VLAN_TAG_2); + if (ret < 0) { + NET_ERR("Cannot enable VLAN for tag %d (%d)", + CONFIG_SAMPLE_VLAN_TAG_2, ret); + } + +#endif + + return ret; +} diff --git a/tests/net/checksum_offload/src/main.c b/tests/net/checksum_offload/src/main.c index ac2220adca016..fd447bf7b3da7 100644 --- a/tests/net/checksum_offload/src/main.c +++ b/tests/net/checksum_offload/src/main.c @@ -96,6 +96,8 @@ static void eth_iface_init(struct net_if *iface) DBG("Iface %p addr %s\n", iface, net_sprint_ll_addr(context->mac_addr, sizeof(context->mac_addr))); + + ethernet_init(iface); } static int eth_tx_offloading_disabled(struct net_if *iface, struct net_pkt *pkt) @@ -250,15 +252,17 @@ static int eth_init(struct device *dev) return 0; } -NET_DEVICE_INIT(eth_offloading_disabled_test, "eth_offloading_disabled_test", - eth_init, ð_context_offloading_disabled, - NULL, CONFIG_ETH_INIT_PRIORITY, &api_funcs_offloading_disabled, - ETHERNET_L2, NET_L2_GET_CTX_TYPE(ETHERNET_L2), 1500); - -NET_DEVICE_INIT(eth_offloading_enabled_test, "eth_offloading_enabled_test", - eth_init, ð_context_offloading_enabled, - NULL, CONFIG_ETH_INIT_PRIORITY, &api_funcs_offloading_enabled, - ETHERNET_L2, NET_L2_GET_CTX_TYPE(ETHERNET_L2), 1500); +ETH_NET_DEVICE_INIT(eth_offloading_disabled_test, + "eth_offloading_disabled_test", + eth_init, ð_context_offloading_disabled, + NULL, CONFIG_ETH_INIT_PRIORITY, + &api_funcs_offloading_disabled, 1500); + +ETH_NET_DEVICE_INIT(eth_offloading_enabled_test, + "eth_offloading_enabled_test", + eth_init, ð_context_offloading_enabled, + NULL, CONFIG_ETH_INIT_PRIORITY, + &api_funcs_offloading_enabled, 1500); struct user_data { int eth_if_count; diff --git a/tests/net/tx_timestamp/CMakeLists.txt b/tests/net/tx_timestamp/CMakeLists.txt new file mode 100644 index 0000000000000..46796e144cb7d --- /dev/null +++ b/tests/net/tx_timestamp/CMakeLists.txt @@ -0,0 +1,6 @@ +include($ENV{ZEPHYR_BASE}/cmake/app/boilerplate.cmake NO_POLICY_SCOPE) +project(NONE) + +target_include_directories(app PRIVATE $ENV{ZEPHYR_BASE}/subsys/net/ip) +FILE(GLOB app_sources src/*.c) +target_sources(app PRIVATE ${app_sources}) diff --git a/tests/net/tx_timestamp/prj.conf b/tests/net/tx_timestamp/prj.conf new file mode 100644 index 0000000000000..48c6da2a90d52 --- /dev/null +++ b/tests/net/tx_timestamp/prj.conf @@ -0,0 +1,33 @@ +CONFIG_NETWORKING=y +CONFIG_NET_TEST=y +CONFIG_NET_IPV6=y +CONFIG_NET_UDP=y +CONFIG_NET_TCP=n +CONFIG_NET_IPV4=n +CONFIG_NET_MAX_CONTEXTS=4 +CONFIG_NET_L2_ETHERNET=y +CONFIG_NET_LOG=y +CONFIG_SYS_LOG_SHOW_COLOR=y +CONFIG_ENTROPY_GENERATOR=y +CONFIG_TEST_RANDOM_GENERATOR=y +CONFIG_NET_IPV6_DAD=n +CONFIG_NET_IPV6_MLD=n +CONFIG_NET_PKT_TX_COUNT=15 +CONFIG_NET_PKT_RX_COUNT=15 +CONFIG_NET_BUF_RX_COUNT=15 +CONFIG_NET_BUF_TX_COUNT=15 +CONFIG_NET_IF_MAX_IPV6_COUNT=6 +CONFIG_NET_IF_UNICAST_IPV6_ADDR_COUNT=6 +CONFIG_NET_IPV6_ND=n +CONFIG_ZTEST=y +CONFIG_NET_APP=n +CONFIG_NET_APP_SETTINGS=n +CONFIG_NET_DEBUG_L2_ETHERNET=n +CONFIG_NET_DEBUG_CONTEXT=n +CONFIG_NET_DEBUG_IF=n +CONFIG_NET_DEBUG_CORE=n +CONFIG_NET_DEBUG_IPV6=n +CONFIG_NET_DEBUG_NET_PKT=y +CONFIG_SYS_LOG_NET_LEVEL=4 +CONFIG_NET_SHELL=n +CONFIG_NET_PKT_TIMESTAMP=y diff --git a/tests/net/tx_timestamp/src/main.c b/tests/net/tx_timestamp/src/main.c new file mode 100644 index 0000000000000..18cd169cddd25 --- /dev/null +++ b/tests/net/tx_timestamp/src/main.c @@ -0,0 +1,535 @@ +/* main.c - Application main entry point */ + +/* + * Copyright (c) 2018 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include "ipv6.h" + +#define NET_LOG_ENABLED 1 +#include "net_private.h" + +#if defined(CONFIG_NET_DEBUG_L2_ETHERNET) +#define DBG(fmt, ...) printk(fmt, ##__VA_ARGS__) +#else +#define DBG(fmt, ...) +#endif + +#define PORT 9999 + +static char *test_data = "Test data to be sent"; + +/* Interface 1 addresses */ +static struct in6_addr my_addr1 = { { { 0x20, 0x01, 0x0d, 0xb8, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0x1 } } }; + +/* Interface 2 addresses */ +static struct in6_addr my_addr2 = { { { 0x20, 0x01, 0x0d, 0xb8, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0x1 } } }; + +/* Destination address for test packets */ +static struct in6_addr dst_addr = { { { 0x20, 0x01, 0x0d, 0xb8, 9, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0x1 } } }; + +/* Extra address is assigned to ll_addr */ +static struct in6_addr ll_addr = { { { 0xfe, 0x80, 0x43, 0xb8, 0, 0, 0, 0, + 0, 0, 0, 0xf2, 0xaa, 0x29, 0x02, + 0x04 } } }; + +/* Keep track of all ethernet interfaces */ +static struct net_if *eth_interfaces[2]; + +static struct net_context *udp_v6_ctx; + +static bool test_failed; +static bool test_started; +static bool do_timestamp; +static bool timestamp_cb_called; +static struct net_if_timestamp_cb timestamp_cb; +static struct net_if_timestamp_cb timestamp_cb_2; +static struct net_if_timestamp_cb timestamp_cb_3; + +static K_SEM_DEFINE(wait_data, 0, UINT_MAX); + +#define WAIT_TIME K_SECONDS(1) + +struct eth_context { + struct net_if *iface; + u8_t mac_addr[6]; +}; + +static struct eth_context eth_context; +static struct eth_context eth_context2; + +static void eth_iface_init(struct net_if *iface) +{ + struct device *dev = net_if_get_device(iface); + struct eth_context *context = dev->driver_data; + + net_if_set_link_addr(iface, context->mac_addr, + sizeof(context->mac_addr), + NET_LINK_ETHERNET); + + ethernet_init(iface); +} + +static int eth_tx(struct net_if *iface, struct net_pkt *pkt) +{ + if (!pkt->frags) { + DBG("No data to send!\n"); + return -ENODATA; + } + + if (test_started) { + if (do_timestamp) { + /* Simulate the clock advancing */ + pkt->timestamp.nanosecond = pkt->timestamp.second + 1; + + net_if_add_tx_timestamp(pkt); + } else { + k_sem_give(&wait_data); + } + } + + net_pkt_unref(pkt); + test_started = false; + + return 0; +} + +static enum eth_hw_caps eth_get_capabilities(struct device *dev) +{ + return 0; +} + +static struct ethernet_api api_funcs = { + .iface_api.init = eth_iface_init, + .iface_api.send = eth_tx, + + .get_capabilities = eth_get_capabilities, +}; + +static void generate_mac(u8_t *mac_addr) +{ + /* 00-00-5E-00-53-xx Documentation RFC 7042 */ + mac_addr[0] = 0x00; + mac_addr[1] = 0x00; + mac_addr[2] = 0x5E; + mac_addr[3] = 0x00; + mac_addr[4] = 0x53; + mac_addr[5] = sys_rand32_get(); +} + +static int eth_init(struct device *dev) +{ + struct eth_context *context = dev->driver_data; + + generate_mac(context->mac_addr); + + return 0; +} + +ETH_NET_DEVICE_INIT(eth_test, "eth_test", eth_init, ð_context, + NULL, CONFIG_ETH_INIT_PRIORITY, &api_funcs, 1500); + +ETH_NET_DEVICE_INIT(eth_test2, "eth_test2", eth_init, ð_context2, + NULL, CONFIG_ETH_INIT_PRIORITY, &api_funcs, 1500); + +static void timestamp_callback(struct net_pkt *pkt) +{ + timestamp_cb_called = true; + + if (do_timestamp) { + /* This is very artificial test but make sure that we + * have advanced the time a bit. + */ + zassert_true(pkt->timestamp.nanosecond > pkt->timestamp.second, + "Timestamp not working ok (%d < %d)\n", + pkt->timestamp.nanosecond, pkt->timestamp.second); + } + + /* The pkt was ref'ed in send_some_data()() */ + net_pkt_unref(pkt); + + if (do_timestamp) { + k_sem_give(&wait_data); + } +} + +static void timestamp_setup(void) +{ + struct net_if *iface; + struct net_pkt *pkt; + + iface = eth_interfaces[0]; + + net_if_register_timestamp_cb(×tamp_cb, iface, + timestamp_callback); + + timestamp_cb_called = false; + do_timestamp = false; + + pkt = net_pkt_get_reserve_tx(0, K_FOREVER); + net_pkt_set_iface(pkt, iface); + + /* Make sure that the callback function is called */ + net_if_call_timestamp_cb(pkt); + + zassert_true(timestamp_cb_called, "Timestamp callback not called\n"); + zassert_equal(pkt->ref, 0, "Pkt %p not released\n"); +} + +static void timestamp_callback_2(struct net_pkt *pkt) +{ + timestamp_cb_called = true; + + if (do_timestamp) { + /* This is very artificial test but make sure that we + * have advanced the time a bit. + */ + zassert_true(pkt->timestamp.nanosecond > pkt->timestamp.second, + "Timestamp not working ok (%d < %d)\n", + pkt->timestamp.nanosecond, pkt->timestamp.second); + } + + zassert_equal(eth_interfaces[1], net_pkt_iface(pkt), + "Invalid interface"); + + /* The pkt was ref'ed in send_some_data()() */ + net_pkt_unref(pkt); + + if (do_timestamp) { + k_sem_give(&wait_data); + } +} + +static void timestamp_setup_2nd_iface(void) +{ + struct net_if *iface; + struct net_pkt *pkt; + + iface = eth_interfaces[1]; + + net_if_register_timestamp_cb(×tamp_cb_2, iface, + timestamp_callback_2); + + timestamp_cb_called = false; + do_timestamp = false; + + pkt = net_pkt_get_reserve_tx(0, K_FOREVER); + net_pkt_set_iface(pkt, iface); + + /* Make sure that the callback function is called */ + net_if_call_timestamp_cb(pkt); + + zassert_true(timestamp_cb_called, "Timestamp callback not called\n"); + zassert_equal(pkt->ref, 0, "Pkt %p not released\n"); +} + +static void timestamp_setup_all(void) +{ + struct net_pkt *pkt; + + net_if_register_timestamp_cb(×tamp_cb_3, NULL, + timestamp_callback); + + timestamp_cb_called = false; + do_timestamp = false; + + pkt = net_pkt_get_reserve_tx(0, K_FOREVER); + net_pkt_set_iface(pkt, eth_interfaces[0]); + + /* The callback is called twice because we have two matching callbacks + * as the interface is set to NULL when registering cb. So we need to + * ref the pkt here because the callback releases pkt. + */ + net_pkt_ref(pkt); + + /* Make sure that the callback function is called */ + net_if_call_timestamp_cb(pkt); + + zassert_true(timestamp_cb_called, "Timestamp callback not called\n"); + zassert_equal(pkt->ref, 0, "Pkt %p not released\n"); + + net_if_unregister_timestamp_cb(×tamp_cb_3); +} + +static void timestamp_cleanup(void) +{ + struct net_if *iface; + struct net_pkt *pkt; + + net_if_unregister_timestamp_cb(×tamp_cb); + + iface = eth_interfaces[0]; + + timestamp_cb_called = false; + do_timestamp = false; + + pkt = net_pkt_get_reserve_tx(0, K_FOREVER); + net_pkt_set_iface(pkt, iface); + + /* Make sure that the callback function is not called after unregister + */ + net_if_call_timestamp_cb(pkt); + + zassert_false(timestamp_cb_called, "Timestamp callback called\n"); + zassert_false(pkt->ref < 1, "Pkt %p released\n"); + + net_pkt_unref(pkt); +} + +struct user_data { + int eth_if_count; + int total_if_count; +}; + +#if defined(CONFIG_NET_DEBUG_L2_ETHERNET) +static const char *iface2str(struct net_if *iface) +{ +#ifdef CONFIG_NET_L2_ETHERNET + if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) { + return "Ethernet"; + } +#endif + + return ""; +} +#endif + +static void iface_cb(struct net_if *iface, void *user_data) +{ + struct user_data *ud = user_data; + + DBG("Interface %p (%s) [%d]\n", iface, iface2str(iface), + net_if_get_by_iface(iface)); + + if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) { + eth_interfaces[ud->eth_if_count++] = iface; + } + + /* By default all interfaces are down initially */ + net_if_down(iface); + + ud->total_if_count++; +} + +static void address_setup(void) +{ + struct net_if_addr *ifaddr; + struct net_if *iface1, *iface2; + + struct user_data ud = { 0 }; + + net_if_foreach(iface_cb, &ud); + + iface1 = eth_interfaces[0]; + iface2 = eth_interfaces[1]; + + zassert_not_null(iface1, "Interface 1\n"); + zassert_not_null(iface2, "Interface 2\n"); + + ifaddr = net_if_ipv6_addr_add(iface1, &my_addr1, + NET_ADDR_MANUAL, 0); + if (!ifaddr) { + DBG("Cannot add IPv6 address %s\n", + net_sprint_ipv6_addr(&my_addr1)); + zassert_not_null(ifaddr, "addr1\n"); + } + + /* For testing purposes we need to set the adddresses preferred */ + ifaddr->addr_state = NET_ADDR_PREFERRED; + + ifaddr = net_if_ipv6_addr_add(iface1, &ll_addr, + NET_ADDR_MANUAL, 0); + if (!ifaddr) { + DBG("Cannot add IPv6 address %s\n", + net_sprint_ipv6_addr(&ll_addr)); + zassert_not_null(ifaddr, "ll_addr\n"); + } + + ifaddr->addr_state = NET_ADDR_PREFERRED; + + ifaddr = net_if_ipv6_addr_add(iface2, &my_addr2, + NET_ADDR_MANUAL, 0); + if (!ifaddr) { + DBG("Cannot add IPv6 address %s\n", + net_sprint_ipv6_addr(&my_addr2)); + zassert_not_null(ifaddr, "addr2\n"); + } + + ifaddr->addr_state = NET_ADDR_PREFERRED; + + net_if_up(iface1); + net_if_up(iface2); + + /* The interface might receive data which might fail the checks + * in the iface sending function, so we need to reset the failure + * flag. + */ + test_failed = false; +} + +static bool add_neighbor(struct net_if *iface, struct in6_addr *addr) +{ + struct net_linkaddr_storage llstorage; + struct net_linkaddr lladdr; + struct net_nbr *nbr; + + llstorage.addr[0] = 0x01; + llstorage.addr[1] = 0x02; + llstorage.addr[2] = 0x33; + llstorage.addr[3] = 0x44; + llstorage.addr[4] = 0x05; + llstorage.addr[5] = 0x06; + + lladdr.len = 6; + lladdr.addr = llstorage.addr; + lladdr.type = NET_LINK_ETHERNET; + + nbr = net_ipv6_nbr_add(iface, addr, &lladdr, false, + NET_IPV6_NBR_STATE_REACHABLE); + if (!nbr) { + DBG("Cannot add dst %s to neighbor cache\n", + net_sprint_ipv6_addr(addr)); + return false; + } + + return true; +} + +static struct net_pkt *send_some_data(struct net_if *iface, bool ref_pkt) +{ + struct net_pkt *pkt; + struct net_buf *frag; + int ret, len; + struct sockaddr_in6 dst_addr6 = { + .sin6_family = AF_INET6, + .sin6_port = htons(PORT), + }; + struct sockaddr_in6 src_addr6 = { + .sin6_family = AF_INET6, + .sin6_port = 0, + }; + + ret = net_context_get(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, + &udp_v6_ctx); + zassert_equal(ret, 0, "Create IPv6 UDP context failed\n"); + + memcpy(&src_addr6.sin6_addr, &my_addr1, sizeof(struct in6_addr)); + memcpy(&dst_addr6.sin6_addr, &dst_addr, sizeof(struct in6_addr)); + + ret = net_context_bind(udp_v6_ctx, (struct sockaddr *)&src_addr6, + sizeof(struct sockaddr_in6)); + zassert_equal(ret, 0, "Context bind failure test failed\n"); + + pkt = net_pkt_get_tx(udp_v6_ctx, K_FOREVER); + zassert_not_null(pkt, "Cannot get pkt\n"); + frag = net_pkt_get_data(udp_v6_ctx, K_FOREVER); + zassert_not_null(frag, "Cannot get frag\n"); + net_pkt_frag_add(pkt, frag); + + len = strlen(test_data); + memcpy(net_buf_add(frag, len), test_data, len); + net_pkt_set_appdatalen(pkt, len); + + ret = add_neighbor(iface, &dst_addr); + zassert_true(ret, "Cannot add neighbor\n"); + + if (ref_pkt) { + /* As the Tx function will release the pkt, try to ref it + * before sending. + */ + net_pkt_ref(pkt); + } + + pkt->timestamp.nanosecond = 0; + pkt->timestamp.second = k_cycle_get_32(); + + ret = net_context_sendto(pkt, (struct sockaddr *)&dst_addr6, + sizeof(struct sockaddr_in6), + NULL, 0, NULL, NULL); + zassert_equal(ret, 0, "Send UDP pkt failed\n"); + + net_context_unref(udp_v6_ctx); + + return pkt; +} + +static void check_timestamp_before_enabling(void) +{ + struct net_pkt *pkt; + + test_started = true; + do_timestamp = false; + + pkt = send_some_data(eth_interfaces[0], false); + + if (k_sem_take(&wait_data, WAIT_TIME)) { + DBG("Timeout while waiting interface data\n"); + zassert_false(true, "Timeout\n"); + } + + /* As there was no TX timestamp handler defined, the eth_tx() + * should have unreffed the packet by now so the ref count + * should be zero now. + */ + zassert_equal(pkt->ref, 0, "packet %p was not released (ref %d)\n", + pkt, pkt->ref); +} + +static void check_timestamp_after_enabling(void) +{ + struct net_pkt *pkt; + + test_started = true; + do_timestamp = true; + + pkt = send_some_data(eth_interfaces[0], true); + + if (k_sem_take(&wait_data, WAIT_TIME)) { + DBG("Timeout while waiting interface data\n"); + zassert_false(true, "Timeout\n"); + } + + /* As there is a TX timestamp handler defined, the eth_tx() + * and timestamp_cb() should have unreffed the packet by now so + * the ref count should be zero at this point. + */ + zassert_equal(pkt->ref, 0, "packet %p was not released (ref %d)\n", + pkt, pkt->ref); +} + +void test_main(void) +{ + ztest_test_suite(net_tx_timestamp_test, + ztest_unit_test(address_setup), + ztest_unit_test(check_timestamp_before_enabling), + ztest_unit_test(timestamp_setup), + ztest_unit_test(timestamp_setup_2nd_iface), + ztest_unit_test(timestamp_setup_all), + ztest_unit_test(check_timestamp_after_enabling), + ztest_unit_test(timestamp_cleanup) + ); + + ztest_run_test_suite(net_tx_timestamp_test); +} diff --git a/tests/net/tx_timestamp/testcase.yaml b/tests/net/tx_timestamp/testcase.yaml new file mode 100644 index 0000000000000..297dded455ce6 --- /dev/null +++ b/tests/net/tx_timestamp/testcase.yaml @@ -0,0 +1,5 @@ +tests: + test: + min_ram: 16 + tags: net tx_timestamp gptp + depends_on: netif diff --git a/tests/net/vlan/CMakeLists.txt b/tests/net/vlan/CMakeLists.txt new file mode 100644 index 0000000000000..46796e144cb7d --- /dev/null +++ b/tests/net/vlan/CMakeLists.txt @@ -0,0 +1,6 @@ +include($ENV{ZEPHYR_BASE}/cmake/app/boilerplate.cmake NO_POLICY_SCOPE) +project(NONE) + +target_include_directories(app PRIVATE $ENV{ZEPHYR_BASE}/subsys/net/ip) +FILE(GLOB app_sources src/*.c) +target_sources(app PRIVATE ${app_sources}) diff --git a/tests/net/vlan/prj.conf b/tests/net/vlan/prj.conf new file mode 100644 index 0000000000000..12e3256bf4a37 --- /dev/null +++ b/tests/net/vlan/prj.conf @@ -0,0 +1,37 @@ +CONFIG_NETWORKING=y +CONFIG_NET_TEST=y +CONFIG_NET_IPV6=y +CONFIG_NET_UDP=y +CONFIG_NET_TCP=n +CONFIG_NET_IPV4=n +CONFIG_NET_MAX_CONTEXTS=4 +CONFIG_NET_L2_ETHERNET=y +CONFIG_NET_L2_DUMMY=y +CONFIG_NET_LOG=y +CONFIG_SYS_LOG_SHOW_COLOR=y +CONFIG_ENTROPY_GENERATOR=y +CONFIG_TEST_RANDOM_GENERATOR=y +CONFIG_NET_IPV6_DAD=n +CONFIG_NET_IPV6_MLD=n +CONFIG_NET_PKT_TX_COUNT=15 +CONFIG_NET_PKT_RX_COUNT=15 +CONFIG_NET_BUF_RX_COUNT=15 +CONFIG_NET_BUF_TX_COUNT=15 +CONFIG_NET_IF_MAX_IPV6_COUNT=6 +CONFIG_NET_IF_UNICAST_IPV6_ADDR_COUNT=6 +CONFIG_NET_IPV6_ND=n +CONFIG_NET_VLAN=y +CONFIG_NET_VLAN_COUNT=4 +CONFIG_NET_CONTEXT_PRIORITY=y +CONFIG_ZTEST=y +CONFIG_NET_APP=n +CONFIG_NET_APP_SETTINGS=n +CONFIG_NET_DEBUG_L2_ETHERNET=n +CONFIG_NET_DEBUG_CONTEXT=n +CONFIG_NET_DEBUG_IF=n +CONFIG_NET_DEBUG_CORE=n +CONFIG_NET_DEBUG_IPV6=n +CONFIG_NET_DEBUG_NET_PKT=y +CONFIG_SYS_LOG_NET_LEVEL=4 +CONFIG_NET_SHELL=n +CONFIG_ETH_NATIVE_POSIX=n diff --git a/tests/net/vlan/src/main.c b/tests/net/vlan/src/main.c new file mode 100644 index 0000000000000..143f3c435b87d --- /dev/null +++ b/tests/net/vlan/src/main.c @@ -0,0 +1,790 @@ +/* main.c - Application main entry point */ + +/* + * Copyright (c) 2018 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include "ipv6.h" + +#define NET_LOG_ENABLED 1 +#include "net_private.h" + +#if defined(CONFIG_NET_DEBUG_L2_ETHERNET) +#define DBG(fmt, ...) printk(fmt, ##__VA_ARGS__) +#else +#define DBG(fmt, ...) +#endif + +#define PORT 9999 + +#define VLAN_TAG_1 100 +#define VLAN_TAG_2 200 +#define VLAN_TAG_3 300 +#define VLAN_TAG_4 400 +#define VLAN_TAG_5 500 + +static char *test_data = "Test data to be sent"; + +/* Interface 1 addresses */ +static struct in6_addr my_addr1 = { { { 0x20, 0x01, 0x0d, 0xb8, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0x1 } } }; + +/* Interface 2 addresses */ +static struct in6_addr my_addr2 = { { { 0x20, 0x01, 0x0d, 0xb8, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0x1 } } }; + +/* Interface 3 addresses */ +static struct in6_addr my_addr3 = { { { 0x20, 0x01, 0x0d, 0xb8, 2, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0x1 } } }; + +/* Destination address for test packets */ +static struct in6_addr dst_addr = { { { 0x20, 0x01, 0x0d, 0xb8, 9, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0x1 } } }; + +/* Extra address is assigned to ll_addr */ +static struct in6_addr ll_addr = { { { 0xfe, 0x80, 0x43, 0xb8, 0, 0, 0, 0, + 0, 0, 0, 0xf2, 0xaa, 0x29, 0x02, + 0x04 } } }; + +/* Keep track of all ethernet interfaces */ +static struct net_if *eth_interfaces[NET_VLAN_MAX_COUNT + 1]; +static struct net_if *dummy_interfaces[2]; +static struct net_if *extra_eth; + +static struct net_context *udp_v6_ctx; + +static bool test_failed; +static bool test_started; + +static K_SEM_DEFINE(wait_data, 0, UINT_MAX); + +#define WAIT_TIME K_SECONDS(1) + +struct eth_context { + struct net_if *iface; + u8_t mac_addr[6]; + + u16_t expecting_tag; +}; + +static struct eth_context eth_vlan_context; + +static void eth_vlan_iface_init(struct net_if *iface) +{ + struct device *dev = net_if_get_device(iface); + struct eth_context *context = dev->driver_data; + + net_if_set_link_addr(iface, context->mac_addr, + sizeof(context->mac_addr), + NET_LINK_ETHERNET); + + ethernet_init(iface); +} + +static int eth_tx(struct net_if *iface, struct net_pkt *pkt) +{ + struct eth_context *context = net_if_get_device(iface)->driver_data; + + zassert_equal_ptr(ð_vlan_context, context, + "Context pointers do not match (%p vs %p)", + eth_vlan_context, context); + + if (!pkt->frags) { + DBG("No data to send!\n"); + return -ENODATA; + } + + if (test_started) { + struct net_eth_vlan_hdr *hdr = + (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); + + zassert_equal(context->expecting_tag, + net_pkt_vlan_tag(pkt), + "Invalid VLAN tag (%d vs %d) in TX pkt\n", + net_pkt_vlan_tag(pkt), + context->expecting_tag); + + zassert_equal(context->expecting_tag, + net_eth_get_vid(ntohs(hdr->vlan.tci)), + "Invalid VLAN tag in ethernet header\n"); + + k_sem_give(&wait_data); + } + + net_pkt_unref(pkt); + + return 0; +} + +static struct ethernet_api api_funcs = { + .iface_api.init = eth_vlan_iface_init, + .iface_api.send = eth_tx, +}; + +static void generate_mac(u8_t *mac_addr) +{ + /* 00-00-5E-00-53-xx Documentation RFC 7042 */ + mac_addr[0] = 0x00; + mac_addr[1] = 0x00; + mac_addr[2] = 0x5E; + mac_addr[3] = 0x00; + mac_addr[4] = 0x53; + mac_addr[5] = sys_rand32_get(); +} + +static int eth_vlan_init(struct device *dev) +{ + struct eth_context *context = dev->driver_data; + + generate_mac(context->mac_addr); + + return 0; +} + +ETH_NET_DEVICE_INIT(eth_vlan_test, "eth_vlan_test", eth_vlan_init, + ð_vlan_context, NULL, CONFIG_ETH_INIT_PRIORITY, + &api_funcs, 1500); + +static int eth_init(struct device *dev) +{ + struct eth_context *context = dev->driver_data; + + generate_mac(context->mac_addr); + + return 0; +} + +/* Create one ethernet interface that does not have VLAN support. This + * is quite unlikely that this would be done in real life but for testing + * purposes create it here. + */ +NET_DEVICE_INIT(eth_test, "eth_test", eth_init, ð_vlan_context, + NULL, CONFIG_ETH_INIT_PRIORITY, &api_funcs, + ETHERNET_L2, NET_L2_GET_CTX_TYPE(ETHERNET_L2), 1500); + +struct net_if_test { + u8_t idx; /* not used for anything, just a dummy value */ + u8_t mac_addr[sizeof(struct net_eth_addr)]; + struct net_linkaddr ll_addr; +}; + +static int net_iface_dev_init(struct device *dev) +{ + return 0; +} + +static u8_t *net_iface_get_mac(struct device *dev) +{ + struct net_if_test *data = dev->driver_data; + + if (data->mac_addr[2] == 0x00) { + /* 00-00-5E-00-53-xx Documentation RFC 7042 */ + data->mac_addr[0] = 0x00; + data->mac_addr[1] = 0x00; + data->mac_addr[2] = 0x5E; + data->mac_addr[3] = 0x00; + data->mac_addr[4] = 0x53; + data->mac_addr[5] = sys_rand32_get(); + } + + data->ll_addr.addr = data->mac_addr; + data->ll_addr.len = 6; + + return data->mac_addr; +} + +static void net_iface_init(struct net_if *iface) +{ + u8_t *mac = net_iface_get_mac(net_if_get_device(iface)); + + net_if_set_link_addr(iface, mac, sizeof(struct net_eth_addr), + NET_LINK_ETHERNET); +} + +static int sender_iface(struct net_if *iface, struct net_pkt *pkt) +{ + net_pkt_unref(pkt); + + return 0; +} + +struct net_if_test net_iface1_data; +struct net_if_test net_iface2_data; + +static struct net_if_api net_iface_api = { + .init = net_iface_init, + .send = sender_iface, +}; + +/* For testing purposes, create two dummy network interfaces so we can check + * that no VLANs are created for it. + */ +NET_DEVICE_INIT_INSTANCE(net_iface1_test, + "iface1", + iface1, + net_iface_dev_init, + &net_iface1_data, + NULL, + CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, + &net_iface_api, + DUMMY_L2, + NET_L2_GET_CTX_TYPE(DUMMY_L2), + 127); + +NET_DEVICE_INIT_INSTANCE(net_iface2_test, + "iface2", + iface2, + net_iface_dev_init, + &net_iface2_data, + NULL, + CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, + &net_iface_api, + DUMMY_L2, + NET_L2_GET_CTX_TYPE(DUMMY_L2), + 127); + +struct user_data { + int eth_if_count; + int dummy_if_count; + int total_if_count; +}; + +#if defined(CONFIG_NET_DEBUG_L2_ETHERNET) +static const char *iface2str(struct net_if *iface) +{ +#ifdef CONFIG_NET_L2_ETHERNET + if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) { + return "Ethernet"; + } +#endif + +#ifdef CONFIG_NET_L2_DUMMY + if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) { + return "Dummy"; + } +#endif + + return ""; +} +#endif + +static void iface_cb(struct net_if *iface, void *user_data) +{ + struct user_data *ud = user_data; + + DBG("Interface %p (%s) [%d]\n", iface, iface2str(iface), + net_if_get_by_iface(iface)); + + if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) { + if (PART_OF_ARRAY(NET_IF_GET_NAME(eth_test, 0), iface)) { + if (!extra_eth) { + /* Just use the first interface */ + extra_eth = iface; + } + } else { + eth_interfaces[ud->eth_if_count++] = iface; + } + } + + if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) { + dummy_interfaces[ud->dummy_if_count++] = iface; + + zassert_true(ud->dummy_if_count <= 2, + "Too many dummy interfaces\n"); + } + + /* By default all interfaces are down initially */ + net_if_down(iface); + + ud->total_if_count++; +} + +static void vlan_setup(void) +{ + struct user_data ud = { 0 }; + + /* Make sure we have enough virtual interfaces */ + net_if_foreach(iface_cb, &ud); + + /* One extra eth interface without vlan support */ + zassert_equal(ud.eth_if_count, NET_VLAN_MAX_COUNT, + "Invalid numer of VLANs %d vs %d\n", + ud.eth_if_count, NET_VLAN_MAX_COUNT); + + zassert_equal(ud.total_if_count, NET_VLAN_MAX_COUNT + 1 + 2, + "Invalid numer of interfaces\n"); + + /* Put the extra non-vlan ethernet interface to last */ + eth_interfaces[4] = extra_eth; + zassert_not_null(extra_eth, "Extra interface missing\n"); + zassert_equal_ptr(net_if_l2(extra_eth), &NET_L2_GET_NAME(ETHERNET), + "Invalid L2 type %p for iface %p (should be %p)\n", + net_if_l2(extra_eth), extra_eth, + &NET_L2_GET_NAME(ETHERNET)); +} + +static void address_setup(void) +{ + struct net_if_addr *ifaddr; + struct net_if *iface1, *iface2, *iface3; + + iface1 = eth_interfaces[1]; /* This has VLAN enabled */ + iface2 = eth_interfaces[0]; /* and this one not */ + iface3 = eth_interfaces[3]; /* and this one has VLAN enabled */ + + zassert_not_null(iface1, "Interface 1\n"); + zassert_not_null(iface2, "Interface 2\n"); + zassert_not_null(iface3, "Interface 3\n"); + + ifaddr = net_if_ipv6_addr_add(iface1, &my_addr1, + NET_ADDR_MANUAL, 0); + if (!ifaddr) { + DBG("Cannot add IPv6 address %s\n", + net_sprint_ipv6_addr(&my_addr1)); + zassert_not_null(ifaddr, "addr1\n"); + } + + /* For testing purposes we need to set the adddresses preferred */ + ifaddr->addr_state = NET_ADDR_PREFERRED; + + ifaddr = net_if_ipv6_addr_add(iface1, &ll_addr, + NET_ADDR_MANUAL, 0); + if (!ifaddr) { + DBG("Cannot add IPv6 address %s\n", + net_sprint_ipv6_addr(&ll_addr)); + zassert_not_null(ifaddr, "ll_addr\n"); + } + + ifaddr->addr_state = NET_ADDR_PREFERRED; + + ifaddr = net_if_ipv6_addr_add(iface2, &my_addr2, + NET_ADDR_MANUAL, 0); + if (!ifaddr) { + DBG("Cannot add IPv6 address %s\n", + net_sprint_ipv6_addr(&my_addr2)); + zassert_not_null(ifaddr, "addr2\n"); + } + + ifaddr->addr_state = NET_ADDR_PREFERRED; + + ifaddr = net_if_ipv6_addr_add(iface3, &my_addr3, + NET_ADDR_MANUAL, 0); + if (!ifaddr) { + DBG("Cannot add IPv6 address %s\n", + net_sprint_ipv6_addr(&my_addr3)); + zassert_not_null(ifaddr, "addr3\n"); + } + + net_if_up(iface1); + net_if_up(iface2); + net_if_up(iface3); + + /* The interface might receive data which might fail the checks + * in the iface sending function, so we need to reset the failure + * flag. + */ + test_failed = false; +} + +static void vlan_tci_test(void) +{ + struct net_pkt *pkt; + u16_t tci; + u16_t tag; + u8_t priority; + bool dei; + + pkt = net_pkt_get_reserve_tx(0, K_FOREVER); + + tag = NET_VLAN_TAG_UNSPEC; + net_pkt_set_vlan_tag(pkt, tag); + + priority = 0; + net_pkt_set_vlan_priority(pkt, priority); + + zassert_equal(net_pkt_vlan_tag(pkt), NET_VLAN_TAG_UNSPEC, + "invalid VLAN tag unspec\n"); + zassert_equal(net_pkt_vlan_priority(pkt), priority, + "invalid VLAN priority\n"); + + net_pkt_set_vlan_tag(pkt, 0); + zassert_equal(net_pkt_vlan_tag(pkt), 0, "invalid VLAN tag\n"); + + /* TCI should be zero now */ + zassert_equal(net_pkt_vlan_tci(pkt), 0, "invalid VLAN TCI\n"); + + priority = 1; + net_pkt_set_vlan_priority(pkt, priority); + + zassert_equal(net_pkt_vlan_priority(pkt), priority, + "invalid VLAN priority\n"); + + net_pkt_set_vlan_tag(pkt, tag); + + zassert_equal(net_pkt_vlan_tag(pkt), NET_VLAN_TAG_UNSPEC, + "invalid VLAN tag unspec\n"); + + zassert_equal(net_pkt_vlan_priority(pkt), priority, + "invalid VLAN priority\n"); + + net_pkt_set_vlan_tag(pkt, 0); + zassert_equal(net_pkt_vlan_priority(pkt), priority, + "invalid VLAN priority\n"); + + dei = true; + net_pkt_set_vlan_dei(pkt, dei); + + zassert_equal(net_pkt_vlan_dei(pkt), dei, "invalid VLAN DEI\n"); + zassert_equal(net_pkt_vlan_priority(pkt), priority, + "invalid VLAN priority\n"); + zassert_equal(net_pkt_vlan_tag(pkt), 0, "invalid VLAN tag\n"); + + net_pkt_set_vlan_tag(pkt, tag); + zassert_equal(net_pkt_vlan_tag(pkt), tag, "invalid VLAN tag\n"); + zassert_equal(net_pkt_vlan_dei(pkt), dei, "invalid VLAN DEI\n"); + zassert_equal(net_pkt_vlan_priority(pkt), priority, + "invalid VLAN priority\n"); + + dei = false; + net_pkt_set_vlan_dei(pkt, dei); + zassert_equal(net_pkt_vlan_tag(pkt), tag, "invalid VLAN tag\n"); + zassert_equal(net_pkt_vlan_dei(pkt), dei, "invalid VLAN DEI\n"); + zassert_equal(net_pkt_vlan_priority(pkt), priority, + "invalid VLAN priority\n"); + + tag = 0; + net_pkt_set_vlan_tag(pkt, tag); + zassert_equal(net_pkt_vlan_tag(pkt), tag, "invalid VLAN tag\n"); + zassert_equal(net_pkt_vlan_dei(pkt), dei, "invalid VLAN DEI\n"); + zassert_equal(net_pkt_vlan_priority(pkt), priority, + "invalid VLAN priority\n"); + + priority = 0; + net_pkt_set_vlan_priority(pkt, priority); + zassert_equal(net_pkt_vlan_tag(pkt), tag, "invalid VLAN tag\n"); + zassert_equal(net_pkt_vlan_dei(pkt), dei, "invalid VLAN DEI\n"); + zassert_equal(net_pkt_vlan_priority(pkt), priority, + "invalid VLAN priority\n"); + + zassert_equal(net_pkt_vlan_tci(pkt), 0, "invalid VLAN TCI\n"); + + tci = 0; + tag = 100; + priority = 3; + + tci = net_eth_set_vid(tci, tag); + tci = net_eth_set_pcp(tci, priority); + + zassert_equal(tag, net_eth_get_vid(tci), "Invalid VLAN tag\n"); + zassert_equal(priority, net_eth_get_pcp(tci), + "Invalid VLAN priority\n"); +} + +/* Enable two VLAN tags and verity that proper interfaces are enabled. + */ +static void vlan_enable_test(void) +{ + struct ethernet_context *eth_ctx; + struct net_if *iface; + int ret; + + ret = net_eth_vlan_enable(eth_interfaces[1], VLAN_TAG_1); + zassert_equal(ret, 0, "Cannot enable %d (%d)\n", VLAN_TAG_1, ret); + ret = net_eth_vlan_enable(eth_interfaces[3], VLAN_TAG_2); + zassert_equal(ret, 0, "Cannot enable %d (%d)\n", VLAN_TAG_2, ret); + + eth_ctx = net_if_l2_data(eth_interfaces[0]); + + iface = net_eth_get_vlan_iface(eth_ctx, VLAN_TAG_1); + zassert_equal_ptr(iface, eth_interfaces[1], + "Invalid interface for tag %d (%p vs %p)\n", + VLAN_TAG_1, iface, eth_interfaces[1]); + + iface = net_eth_get_vlan_iface(eth_ctx, VLAN_TAG_2); + zassert_equal_ptr(iface, eth_interfaces[3], + "Invalid interface for tag %d (%p vs %p)\n", + VLAN_TAG_2, iface, eth_interfaces[3]); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[0]); + zassert_equal(ret, false, "VLAN enabled for interface 0\n"); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[1]); + zassert_equal(ret, true, "VLAN disabled for interface 1\n"); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[2]); + zassert_equal(ret, false, "VLAN enabled for interface 2\n"); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[3]); + zassert_equal(ret, true, "VLAN disabled for interface 3\n"); + + iface = eth_interfaces[0]; + ret = net_eth_vlan_enable(iface, NET_VLAN_TAG_UNSPEC); + zassert_equal(ret, -EBADF, "Invalid VLAN tag value %d\n", ret); + + iface = eth_interfaces[1]; + ret = net_eth_vlan_enable(iface, VLAN_TAG_1); + zassert_equal(ret, -EALREADY, "VLAN tag %d enabled for iface 1\n", + VLAN_TAG_1); +} + +static void vlan_disable_test(void) +{ + struct ethernet_context *eth_ctx; + struct net_if *iface; + int ret; + + ret = net_eth_vlan_disable(eth_interfaces[1], VLAN_TAG_1); + zassert_equal(ret, 0, "Cannot disable %d (%d)\n", VLAN_TAG_1, ret); + ret = net_eth_vlan_disable(eth_interfaces[3], VLAN_TAG_2); + zassert_equal(ret, 0, "Cannot disable %d (%d)\n", VLAN_TAG_2, ret); + + eth_ctx = net_if_l2_data(eth_interfaces[0]); + + iface = net_eth_get_vlan_iface(eth_ctx, VLAN_TAG_1); + zassert_equal_ptr(iface, NULL, + "Invalid interface for tag %d (%p vs %p)\n", + VLAN_TAG_1, iface, NULL); + + iface = net_eth_get_vlan_iface(eth_ctx, VLAN_TAG_2); + zassert_equal_ptr(iface, NULL, + "Invalid interface for tag %d (%p vs %p)\n", + VLAN_TAG_2, iface, NULL); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[0]); + zassert_equal(ret, false, "VLAN enabled for interface 0\n"); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[1]); + zassert_equal(ret, false, "VLAN enabled for interface 1\n"); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[2]); + zassert_equal(ret, false, "VLAN enabled for interface 2\n"); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[3]); + zassert_equal(ret, false, "VLAN enabled for interface 3\n"); + + iface = eth_interfaces[0]; + ret = net_eth_vlan_disable(iface, NET_VLAN_TAG_UNSPEC); + zassert_equal(ret, -EBADF, "Invalid VLAN tag value %d\n", ret); + + iface = eth_interfaces[1]; + ret = net_eth_vlan_disable(iface, VLAN_TAG_1); + zassert_equal(ret, -ESRCH, "VLAN tag %d disabled for iface 1\n", + VLAN_TAG_1); +} + +static void vlan_enable_all_test(void) +{ + struct ethernet_context *eth_ctx; + struct net_if *iface; + int ret; + + ret = net_eth_vlan_enable(eth_interfaces[0], VLAN_TAG_1); + zassert_equal(ret, 0, "Cannot enable %d\n", VLAN_TAG_1); + ret = net_eth_vlan_enable(eth_interfaces[1], VLAN_TAG_2); + zassert_equal(ret, 0, "Cannot enable %d\n", VLAN_TAG_2); + ret = net_eth_vlan_enable(eth_interfaces[2], VLAN_TAG_3); + zassert_equal(ret, 0, "Cannot enable %d\n", VLAN_TAG_3); + ret = net_eth_vlan_enable(eth_interfaces[3], VLAN_TAG_4); + zassert_equal(ret, 0, "Cannot enable %d\n", VLAN_TAG_4); + + eth_ctx = net_if_l2_data(eth_interfaces[0]); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[0]); + zassert_equal(ret, true, "VLAN disabled for interface 0\n"); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[1]); + zassert_equal(ret, true, "VLAN disabled for interface 1\n"); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[2]); + zassert_equal(ret, true, "VLAN disabled for interface 2\n"); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[3]); + zassert_equal(ret, true, "VLAN disabled for interface 3\n"); + + iface = net_if_get_first_by_type(&NET_L2_GET_NAME(DUMMY)); + zassert_not_null(iface, "No dummy iface found\n"); + + zassert_equal(net_if_l2(iface), &NET_L2_GET_NAME(DUMMY), + "Not a dummy interface\n"); + + ret = net_eth_vlan_enable(iface, VLAN_TAG_5); + zassert_equal(ret, -EINVAL, "Wrong iface type (%d)\n", ret); +} + +static void vlan_disable_all_test(void) +{ + struct ethernet_context *eth_ctx; + struct net_if *iface; + int ret; + + ret = net_eth_vlan_disable(eth_interfaces[0], VLAN_TAG_1); + zassert_equal(ret, 0, "Cannot disable %d\n", VLAN_TAG_1); + ret = net_eth_vlan_disable(eth_interfaces[1], VLAN_TAG_2); + zassert_equal(ret, 0, "Cannot disable %d\n", VLAN_TAG_2); + ret = net_eth_vlan_disable(eth_interfaces[2], VLAN_TAG_3); + zassert_equal(ret, 0, "Cannot disable %d\n", VLAN_TAG_3); + ret = net_eth_vlan_disable(eth_interfaces[3], VLAN_TAG_4); + zassert_equal(ret, 0, "Cannot disable %d\n", VLAN_TAG_4); + + eth_ctx = net_if_l2_data(eth_interfaces[0]); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[0]); + zassert_equal(ret, false, "VLAN enabled for interface 0\n"); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[1]); + zassert_equal(ret, false, "VLAN enabled for interface 1\n"); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[2]); + zassert_equal(ret, false, "VLAN enabled for interface 2\n"); + + ret = net_eth_is_vlan_enabled(eth_ctx, eth_interfaces[3]); + zassert_equal(ret, false, "VLAN enabled for interface 3\n"); + + iface = net_if_get_first_by_type(&NET_L2_GET_NAME(DUMMY)); + zassert_not_null(iface, "No dummy iface found\n"); + + zassert_equal(net_if_l2(iface), &NET_L2_GET_NAME(DUMMY), + "Not a dummy interface\n"); + + ret = net_eth_vlan_disable(iface, VLAN_TAG_5); + zassert_equal(ret, -EINVAL, "Wrong iface type (%d)\n", ret); +} + +static bool add_neighbor(struct net_if *iface, struct in6_addr *addr) +{ + struct net_linkaddr_storage llstorage; + struct net_linkaddr lladdr; + struct net_nbr *nbr; + + llstorage.addr[0] = 0x01; + llstorage.addr[1] = 0x02; + llstorage.addr[2] = 0x33; + llstorage.addr[3] = 0x44; + llstorage.addr[4] = 0x05; + llstorage.addr[5] = 0x06; + + lladdr.len = 6; + lladdr.addr = llstorage.addr; + lladdr.type = NET_LINK_ETHERNET; + + nbr = net_ipv6_nbr_add(iface, addr, &lladdr, false, + NET_IPV6_NBR_STATE_REACHABLE); + if (!nbr) { + DBG("Cannot add dst %s to neighbor cache\n", + net_sprint_ipv6_addr(addr)); + return false; + } + + return true; +} + +static void vlan_send_data_test(void) +{ + struct ethernet_context *eth_ctx; /* This is L2 context */ + struct eth_context *ctx; /* This is interface context */ + struct net_if *iface; + struct net_pkt *pkt; + struct net_buf *frag; + int ret, len; + struct sockaddr_in6 dst_addr6 = { + .sin6_family = AF_INET6, + .sin6_port = htons(PORT), + }; + struct sockaddr_in6 src_addr6 = { + .sin6_family = AF_INET6, + .sin6_port = 0, + }; + + /* Setup the interfaces */ + vlan_enable_test(); + + ret = net_context_get(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, + &udp_v6_ctx); + zassert_equal(ret, 0, "Create IPv6 UDP context failed\n"); + + memcpy(&src_addr6.sin6_addr, &my_addr1, sizeof(struct in6_addr)); + memcpy(&dst_addr6.sin6_addr, &dst_addr, sizeof(struct in6_addr)); + + ret = net_context_bind(udp_v6_ctx, (struct sockaddr *)&src_addr6, + sizeof(struct sockaddr_in6)); + zassert_equal(ret, 0, "Context bind failure test failed\n"); + + iface = eth_interfaces[1]; /* This is the VLAN interface */ + ctx = net_if_get_device(iface)->driver_data; + eth_ctx = net_if_l2_data(iface); + ret = net_eth_is_vlan_enabled(eth_ctx, iface); + zassert_equal(ret, true, "VLAN disabled for interface 1\n"); + + ctx->expecting_tag = VLAN_TAG_1; + + iface = eth_interfaces[3]; /* This is also VLAN interface */ + ctx = net_if_get_device(iface)->driver_data; + eth_ctx = net_if_l2_data(iface); + ret = net_eth_is_vlan_enabled(eth_ctx, iface); + zassert_equal(ret, true, "VLAN disabled for interface 1\n"); + + pkt = net_pkt_get_tx(udp_v6_ctx, K_FOREVER); + zassert_not_null(pkt, "Cannot get pkt\n"); + frag = net_pkt_get_data(udp_v6_ctx, K_FOREVER); + zassert_not_null(frag, "Cannot get frag\n"); + net_pkt_frag_add(pkt, frag); + + /* VLAN tag will be automatically set by ethernet L2 driver + * so we do not need to set it here. + */ + + len = strlen(test_data); + memcpy(net_buf_add(frag, len), test_data, len); + net_pkt_set_appdatalen(pkt, len); + + test_started = true; + + ret = add_neighbor(iface, &dst_addr); + zassert_true(ret, "Cannot add neighbor\n"); + + ret = net_context_sendto(pkt, (struct sockaddr *)&dst_addr6, + sizeof(struct sockaddr_in6), + NULL, 0, NULL, NULL); + zassert_equal(ret, 0, "Send UDP pkt failed\n"); + + if (k_sem_take(&wait_data, WAIT_TIME)) { + DBG("Timeout while waiting interface data\n"); + zassert_false(true, "Timeout\n"); + } + + net_context_unref(udp_v6_ctx); +} + +void test_main(void) +{ + ztest_test_suite(net_vlan_test, + ztest_unit_test(vlan_setup), + ztest_unit_test(address_setup), + ztest_unit_test(vlan_tci_test), + ztest_unit_test(vlan_enable_test), + ztest_unit_test(vlan_disable_test), + ztest_unit_test(vlan_enable_all_test), + ztest_unit_test(vlan_disable_all_test), + ztest_unit_test(vlan_send_data_test) + ); + + ztest_run_test_suite(net_vlan_test); +} diff --git a/tests/net/vlan/testcase.yaml b/tests/net/vlan/testcase.yaml new file mode 100644 index 0000000000000..6cec09910a399 --- /dev/null +++ b/tests/net/vlan/testcase.yaml @@ -0,0 +1,4 @@ +tests: + test: + min_ram: 32 + tags: net vlan