|
6 | 6 |
|
7 | 7 | /*
|
8 | 8 | * Copyright (c) 2018 Intel Corporation
|
| 9 | + * Copyright (c) 2020 Nordic Semiconductor ASA |
9 | 10 | *
|
10 | 11 | * SPDX-License-Identifier: Apache-2.0
|
11 | 12 | */
|
|
21 | 22 | */
|
22 | 23 |
|
23 | 24 | #include <string.h>
|
24 |
| -#include <zephyr/types.h> |
25 | 25 | #include <stdbool.h>
|
| 26 | +#include <limits.h> |
| 27 | +#include <zephyr/types.h> |
| 28 | +#include <sys/slist.h> |
26 | 29 |
|
27 | 30 | #ifdef __cplusplus
|
28 | 31 | extern "C" {
|
29 | 32 | #endif
|
30 | 33 |
|
31 |
| -/** Let the max timeout be 100 ms lower because of |
32 |
| - * possible rounding in delayed work implementation. |
| 34 | +/** @brief Divisor used to support ms resolution timeouts. |
| 35 | + * |
| 36 | + * Because delays are processed in work queues which are not invoked |
| 37 | + * synchronously with clock changes we need to be able to detect timeouts |
| 38 | + * after they occur, which requires comparing "deadline" to "now" with enough |
| 39 | + * "slop" to handle any observable latency due to "now" advancing past |
| 40 | + * "deadline". |
| 41 | + * |
| 42 | + * The simplest solution is to use the native conversion of the well-defined |
| 43 | + * 32-bit unsigned difference to a 32-bit signed difference, which caps the |
| 44 | + * maximum delay at INT32_MAX. This is compatible with the standard mechanism |
| 45 | + * for detecting completion of deadlines that do not overflow their |
| 46 | + * representation. |
33 | 47 | */
|
34 |
| -#define NET_TIMEOUT_MAX_VALUE ((uint32_t)(INT32_MAX - 100)) |
| 48 | +#define NET_TIMEOUT_MAX_VALUE ((uint32_t)INT32_MAX) |
35 | 49 |
|
36 |
| -/** Generic struct for handling network timeouts */ |
| 50 | +/** Generic struct for handling network timeouts. |
| 51 | + * |
| 52 | + * Except for the linking node, all access to state from these objects must go |
| 53 | + * through the defined API. |
| 54 | + */ |
37 | 55 | struct net_timeout {
|
38 |
| - /** Used to track timers */ |
| 56 | + /** Used to link multiple timeouts that share a common timer infrastructure. |
| 57 | + * |
| 58 | + * For examples a set of related timers may use a single delayed work |
| 59 | + * structure, which is always scheduled at the shortest time to a |
| 60 | + * timeout event. |
| 61 | + */ |
39 | 62 | sys_snode_t node;
|
40 | 63 |
|
41 |
| - /** Address lifetime timer start time */ |
| 64 | + /* Time at which the timer was last set. |
| 65 | + * |
| 66 | + * This usually corresponds to the low 32 bits of k_uptime_get(). */ |
42 | 67 | uint32_t timer_start;
|
43 | 68 |
|
44 |
| - /** Address lifetime timer timeout in milliseconds. Note that this |
45 |
| - * value is signed as k_delayed_work_submit() only supports signed |
46 |
| - * delay value. |
| 69 | + /* Portion of remaining timeout that does not exceed |
| 70 | + * NET_TIMEOUT_MAX_VALUE. |
| 71 | + * |
| 72 | + * This value is updated in parallel with timer_start and wrap_counter |
| 73 | + * by net_timeout_evaluate(). |
47 | 74 | */
|
48 |
| - int32_t timer_timeout; |
| 75 | + uint32_t timer_timeout; |
49 | 76 |
|
50 |
| - /** Timer wrap count. Used if the timer timeout is larger than |
51 |
| - * about 24 days. The reason we need to track wrap arounds, is |
52 |
| - * that the timer timeout used in k_delayed_work_submit() is |
53 |
| - * 32-bit signed value and the resolution is 1ms. |
| 77 | + /* Timer wrap count. |
| 78 | + * |
| 79 | + * This tracks multiples of NET_TIMEOUT_MAX_VALUE milliseconds that |
| 80 | + * have yet to pass. It is also updated along with timer_start and |
| 81 | + * wrap_counter by net_timeout_evaluate(). |
54 | 82 | */
|
55 |
| - int32_t wrap_counter; |
| 83 | + uint32_t wrap_counter; |
56 | 84 | };
|
57 | 85 |
|
| 86 | +/** @brief Configure a network timeout structure. |
| 87 | + * |
| 88 | + * @param timeout a pointer to the timeout state. |
| 89 | + * |
| 90 | + * @param lifetime the duration of the timeout in seconds. |
| 91 | + * |
| 92 | + * @param now the time at which the timeout started counting down, in |
| 93 | + * milliseconds. This is generally a captured value of k_uptime_get_32(). |
| 94 | + */ |
| 95 | +void net_timeout_set(struct net_timeout *timeout, |
| 96 | + uint32_t lifetime, |
| 97 | + uint32_t now); |
| 98 | + |
| 99 | +/** @brief Return the 64-bit system time at which the timeout will complete. |
| 100 | + * |
| 101 | + * @note Correct behavior requires invocation of net_timeout_evaluate() at its |
| 102 | + * specified intervals. |
| 103 | + * |
| 104 | + * @param timeout state a pointer to the timeout state, initialized by |
| 105 | + * net_timeout_set() and maintained by net_timeout_evaluate(). |
| 106 | + * |
| 107 | + * @param now the full-precision value of k_uptime_get() relative to which the |
| 108 | + * deadline will be calculated. |
| 109 | + * |
| 110 | + * @return the value of k_uptime_get() at which the timeout will expire. |
| 111 | + */ |
| 112 | +int64_t net_timeout_deadline(const struct net_timeout *timeout, |
| 113 | + int64_t now); |
| 114 | + |
| 115 | +/** @brief Calculate the remaining time to the timeout in whole seconds. |
| 116 | + * |
| 117 | + * @note This function rounds the remaining time down, i.e. if the timeout |
| 118 | + * will occur in 3500 milliseconds the value 3 will be returned. |
| 119 | + * |
| 120 | + * @note Correct behavior requires invocation of net_timeout_evaluate() at its |
| 121 | + * specified intervals. |
| 122 | + * |
| 123 | + * @param timeout a pointer to the timeout state |
| 124 | + * |
| 125 | + * @param now the time relative to which the estimate of remaining time should |
| 126 | + * be calculated. This should be recently captured value from |
| 127 | + * k_uptime_get_32(). |
| 128 | + * |
| 129 | + * @retval 0 if the timeout has completed. |
| 130 | + * @retval positive the remaining duration of the timeout, in seconds. |
| 131 | + */ |
| 132 | +uint32_t net_timeout_remaining(const struct net_timeout *timeout, |
| 133 | + uint32_t now); |
| 134 | + |
| 135 | +/** @brief Update state to reflect elapsed time and get new delay. |
| 136 | + * |
| 137 | + * This function must be invoked periodically to (1) apply the effect of |
| 138 | + * elapsed time on what remains of a total delay that exceeded the maximum |
| 139 | + * representable delay, and (2) determine that either the timeout has |
| 140 | + * completed or that the infrastructure must wait a certain period before |
| 141 | + * checking again for completion. |
| 142 | + * |
| 143 | + * @param timeout a pointer to the timeout state |
| 144 | + * |
| 145 | + * @param now the time relative to which the estimate of remaining time should |
| 146 | + * be calculated. This should be recently captured value from |
| 147 | + * k_uptime_get_32(). |
| 148 | + * |
| 149 | + * @retval 0 if the timeout has completed |
| 150 | + * @retval positive the maximum delay until the state of this timeout should |
| 151 | + * be re-evaluated, in milliseconds. |
| 152 | + */ |
| 153 | +uint32_t net_timeout_evaluate(struct net_timeout *timeout, |
| 154 | + uint32_t now); |
| 155 | + |
58 | 156 | #ifdef __cplusplus
|
59 | 157 | }
|
60 | 158 | #endif
|
|
0 commit comments