Skip to content

Commit e694955

Browse files
committed
[nrf fromlist] settings: zms: code style clean up
Clean some parts of the code and refactor it to avoid multiple nested conditions Upstream PR #: 87792 Signed-off-by: Riadh Ghaddab <[email protected]> (cherry picked from commit 67f8ebd1dbfd69034716756f7bd05bfc9c9f5f2d)
1 parent e13e1d1 commit e694955

File tree

2 files changed

+106
-83
lines changed

2 files changed

+106
-83
lines changed

subsys/settings/src/settings_zms.c

Lines changed: 93 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -253,9 +253,10 @@ static int settings_zms_load_subtree(struct settings_store *cs, const struct set
253253
read_fn_arg.id = ZMS_NAME_ID_FROM_HASH(name_hash) + ZMS_DATA_ID_OFFSET;
254254

255255
ret = settings_call_set_handler(arg->subtree, rc2, settings_zms_read_fn,
256-
&read_fn_arg, (void *)arg);
256+
&read_fn_arg, arg);
257257
/* We should return here as there are no need to look for the next
258-
* hash collision */
258+
* hash collision
259+
*/
259260
return ret;
260261
}
261262

@@ -303,6 +304,37 @@ static ssize_t settings_zms_load_one(struct settings_store *cs, const char *name
303304
return rc;
304305
}
305306

307+
/* Gets the next linked list node either from cache (if enabled) or from persistent
308+
* storage if cache is full or cache is not enabled.
309+
* It updates as well the next cache index and the next linked list node ID.
310+
*/
311+
static int settings_zms_get_next_ll(struct settings_zms *cf,
312+
struct settings_hash_linked_list *settings_element,
313+
uint32_t *ll_hash_id, [[maybe_unused]] uint32_t *ll_cache_index)
314+
{
315+
int ret = 0;
316+
317+
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
318+
if (*ll_cache_index < cf->ll_cache_next) {
319+
*settings_element = cf->ll_cache[*ll_cache_index];
320+
*ll_cache_index = *ll_cache_index + 1;
321+
} else {
322+
#endif
323+
ret = zms_read(&cf->cf_zms, *ll_hash_id, settings_element,
324+
sizeof(struct settings_hash_linked_list));
325+
if (ret < 0) {
326+
return ret;
327+
}
328+
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
329+
}
330+
#endif
331+
332+
/* update next ll_hash_id */
333+
*ll_hash_id = settings_element->next_hash;
334+
335+
return 0;
336+
}
337+
306338
static int settings_zms_load(struct settings_store *cs, const struct settings_load_arg *arg)
307339
{
308340
int ret = 0;
@@ -313,6 +345,8 @@ static int settings_zms_load(struct settings_store *cs, const struct settings_lo
313345
ssize_t rc1;
314346
ssize_t rc2;
315347
uint32_t ll_hash_id;
348+
uint32_t ll_cache_index = 0;
349+
316350
#ifdef CONFIG_SETTINGS_ZMS_LOAD_SUBTREE_PATH
317351
/* If arg->subtree is not null we must load settings in that subtree */
318352
if (arg->subtree != NULL) {
@@ -324,26 +358,22 @@ static int settings_zms_load(struct settings_store *cs, const struct settings_lo
324358
#endif /* CONFIG_SETTINGS_ZMS_LOAD_SUBTREE_PATH */
325359

326360
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
327-
uint32_t ll_cache_index = 0;
328-
329361
if (cf->ll_has_changed) {
330362
/* reload the linked list in cache */
331363
ret = settings_zms_get_last_hash_ids(cf);
332364
if (ret < 0) {
333365
return ret;
334366
}
335367
}
336-
settings_element = cf->ll_cache[ll_cache_index++];
337-
#else
338-
ret = zms_read(&cf->cf_zms, ZMS_LL_HEAD_HASH_ID, &settings_element,
339-
sizeof(struct settings_hash_linked_list));
368+
#endif
369+
370+
/* If subtree is NULL then we must load all found Settings */
371+
ll_hash_id = ZMS_LL_HEAD_HASH_ID;
372+
ret = settings_zms_get_next_ll(cf, &settings_element, &ll_hash_id, &ll_cache_index);
340373
if (ret < 0) {
341374
return ret;
342375
}
343-
#endif /* CONFIG_SETTINGS_ZMS_LL_CACHE */
344-
ll_hash_id = settings_element.next_hash;
345376

346-
/* If subtree is NULL then we must load all found Settings */
347377
while (ll_hash_id) {
348378

349379
/* In the ZMS backend, each setting item is stored in two ZMS
@@ -370,21 +400,13 @@ static int settings_zms_load(struct settings_store *cs, const struct settings_lo
370400
return ret;
371401
}
372402
#endif /* CONFIG_SETTINGS_ZMS_NO_LL_DELETE */
373-
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
374-
if (ll_cache_index < cf->ll_cache_next) {
375-
settings_element = cf->ll_cache[ll_cache_index++];
376-
} else {
377-
#endif
378-
ret = zms_read(&cf->cf_zms, ll_hash_id, &settings_element,
379-
sizeof(struct settings_hash_linked_list));
380-
if (ret < 0) {
381-
return ret;
382-
}
383-
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
403+
404+
ret = settings_zms_get_next_ll(cf, &settings_element, &ll_hash_id,
405+
&ll_cache_index);
406+
if (ret < 0) {
407+
return ret;
384408
}
385-
#endif
386-
/* update next ll_hash_id */
387-
ll_hash_id = settings_element.next_hash;
409+
388410
continue;
389411
}
390412

@@ -403,27 +425,15 @@ static int settings_zms_load(struct settings_store *cs, const struct settings_lo
403425
settings_zms_cache_add(cf, ll_hash_id & ZMS_HASH_MASK, cache_flags);
404426
#endif
405427

406-
ret = settings_call_set_handler(name, rc2, settings_zms_read_fn, &read_fn_arg,
407-
(void *)arg);
428+
ret = settings_call_set_handler(name, rc2, settings_zms_read_fn, &read_fn_arg, arg);
408429
if (ret) {
409-
break;
430+
return ret;
410431
}
411432

412-
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
413-
if (ll_cache_index < cf->ll_cache_next) {
414-
settings_element = cf->ll_cache[ll_cache_index++];
415-
} else {
416-
#endif
417-
ret = zms_read(&cf->cf_zms, ll_hash_id, &settings_element,
418-
sizeof(struct settings_hash_linked_list));
419-
if (ret < 0) {
420-
return ret;
421-
}
422-
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
433+
ret = settings_zms_get_next_ll(cf, &settings_element, &ll_hash_id, &ll_cache_index);
434+
if (ret < 0) {
435+
return ret;
423436
}
424-
#endif
425-
/* update next ll_hash_id */
426-
ll_hash_id = settings_element.next_hash;
427437
}
428438

429439
return ret;
@@ -662,6 +672,45 @@ static ssize_t settings_zms_get_val_len(struct settings_store *cs, const char *n
662672
return 0;
663673
}
664674

675+
/* This function inits the linked list head if it doesn't exist or recover it
676+
* if the ll_last_hash_id is different than the head hash ID
677+
*/
678+
static int settings_zms_init_or_recover_ll(struct settings_zms *cf, uint32_t ll_last_hash_id)
679+
{
680+
struct settings_hash_linked_list settings_element;
681+
int rc = 0;
682+
683+
if (ll_last_hash_id == ZMS_LL_HEAD_HASH_ID) {
684+
/* header doesn't exist */
685+
settings_element.previous_hash = 0;
686+
settings_element.next_hash = 0;
687+
rc = zms_write(&cf->cf_zms, ZMS_LL_HEAD_HASH_ID, &settings_element,
688+
sizeof(struct settings_hash_linked_list));
689+
if (rc < 0) {
690+
return rc;
691+
}
692+
cf->last_hash_id = ZMS_LL_HEAD_HASH_ID;
693+
cf->second_to_last_hash_id = 0;
694+
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
695+
/* store the LL header in cache */
696+
cf->ll_cache_next = 0;
697+
cf->ll_cache[cf->ll_cache_next] = settings_element;
698+
cf->ll_cache_next = cf->ll_cache_next + 1;
699+
#endif
700+
} else {
701+
/* let's recover it by keeping all nodes until the last one */
702+
settings_element.previous_hash = cf->second_to_last_hash_id;
703+
settings_element.next_hash = 0;
704+
rc = zms_write(&cf->cf_zms, cf->last_hash_id, &settings_element,
705+
sizeof(struct settings_hash_linked_list));
706+
if (rc < 0) {
707+
return rc;
708+
}
709+
}
710+
711+
return 0;
712+
}
713+
665714
static int settings_zms_get_last_hash_ids(struct settings_zms *cf)
666715
{
667716
struct settings_hash_linked_list settings_element;
@@ -681,35 +730,7 @@ static int settings_zms_get_last_hash_ids(struct settings_zms *cf)
681730
/* header doesn't exist or linked list broken, reinitialize the header
682731
* if it doesn't exist and recover it if it is broken
683732
*/
684-
if (ll_last_hash_id == ZMS_LL_HEAD_HASH_ID) {
685-
/* header doesn't exist */
686-
const struct settings_hash_linked_list settings_element = {
687-
.previous_hash = 0, .next_hash = 0};
688-
rc = zms_write(&cf->cf_zms, ZMS_LL_HEAD_HASH_ID, &settings_element,
689-
sizeof(struct settings_hash_linked_list));
690-
if (rc < 0) {
691-
return rc;
692-
}
693-
cf->last_hash_id = ZMS_LL_HEAD_HASH_ID;
694-
cf->second_to_last_hash_id = 0;
695-
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
696-
/* store the LL header in cache */
697-
cf->ll_cache_next = 0;
698-
cf->ll_cache[cf->ll_cache_next] = settings_element;
699-
cf->ll_cache_next = cf->ll_cache_next + 1;
700-
#endif
701-
} else {
702-
/* let's recover it by keeping all nodes until the last one */
703-
const struct settings_hash_linked_list settings_element = {
704-
.previous_hash = cf->second_to_last_hash_id,
705-
.next_hash = 0};
706-
rc = zms_write(&cf->cf_zms, cf->last_hash_id, &settings_element,
707-
sizeof(struct settings_hash_linked_list));
708-
if (rc < 0) {
709-
return rc;
710-
}
711-
}
712-
return 0;
733+
return settings_zms_init_or_recover_ll(cf, ll_last_hash_id);
713734
} else if (rc < 0) {
714735
return rc;
715736
}
@@ -769,6 +790,7 @@ static int settings_zms_backend_init(struct settings_zms *cf)
769790
}
770791

771792
cf->hash_collision_num = 0;
793+
cf->ll_cache_next = 0;
772794

773795
rc = settings_zms_get_last_hash_ids(cf);
774796

tests/subsys/settings/performance/settings_test_perf.c

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -19,20 +19,21 @@ static struct k_work_q settings_work_q;
1919
static K_THREAD_STACK_DEFINE(settings_work_stack, 2024);
2020
static struct k_work_delayable pending_store;
2121

22-
#define TEST_SETTINGS_COUNT (128)
23-
#define TEST_STORE_ITR (5)
24-
#define TEST_TIMEOUT_SEC (60)
22+
#define TEST_SETTINGS_COUNT (128)
23+
#define TEST_STORE_ITR (5)
24+
#define TEST_TIMEOUT_SEC (60)
2525
#define TEST_SETTINGS_WORKQ_PRIO (1)
2626

27-
static void bt_scan_cb(const bt_addr_le_t *addr, int8_t rssi, uint8_t adv_type,
28-
struct net_buf_simple *buf)
27+
static void bt_scan_cb([[maybe_unused]] const bt_addr_le_t *addr, [[maybe_unused]] int8_t rssi,
28+
[[maybe_unused]] uint8_t adv_type, struct net_buf_simple *buf)
2929
{
3030
printk("len %u\n", buf->len);
3131
}
3232

3333
struct test_setting {
3434
uint32_t val;
35-
} test_settings[TEST_SETTINGS_COUNT];
35+
};
36+
struct test_setting test_settings[TEST_SETTINGS_COUNT];
3637

3738
K_SEM_DEFINE(waitfor_work, 0, 1);
3839

@@ -45,14 +46,15 @@ static void store_pending(struct k_work *work)
4546
uint32_t total_measured;
4647
uint32_t single_entry_max;
4748
uint32_t single_entry_min;
48-
} stats = {0, 0, 0, UINT32_MAX};
49+
};
50+
struct test_stats stats = {0, 0, 0, UINT32_MAX};
4951

5052
int64_t ts1 = k_uptime_get();
5153

5254
/* benchmark storage performance */
5355
for (int j = 0; j < TEST_STORE_ITR; j++) {
5456
for (int i = 0; i < TEST_SETTINGS_COUNT; i++) {
55-
test_settings[i].val = TEST_SETTINGS_COUNT*j + i;
57+
test_settings[i].val = TEST_SETTINGS_COUNT * j + i;
5658

5759
int64_t ts2 = k_uptime_get();
5860

@@ -80,8 +82,7 @@ static void store_pending(struct k_work *work)
8082
printk("*** storing of %u entries completed ***\n", ARRAY_SIZE(test_settings));
8183
printk("total calculated: %u, total measured: %u\n", stats.total_calculated,
8284
stats.total_measured);
83-
printk("entry max: %u, entry min: %u\n", stats.single_entry_max,
84-
stats.single_entry_min);
85+
printk("entry max: %u, entry min: %u\n", stats.single_entry_max, stats.single_entry_min);
8586

8687
k_sem_give(&waitfor_work);
8788
}
@@ -99,8 +100,8 @@ ZTEST(settings_perf, test_performance)
99100
}
100101

101102
k_work_queue_start(&settings_work_q, settings_work_stack,
102-
K_THREAD_STACK_SIZEOF(settings_work_stack),
103-
K_PRIO_COOP(TEST_SETTINGS_WORKQ_PRIO), NULL);
103+
K_THREAD_STACK_SIZEOF(settings_work_stack),
104+
K_PRIO_COOP(TEST_SETTINGS_WORKQ_PRIO), NULL);
104105
k_thread_name_set(&settings_work_q.thread, "Settings workq");
105106
k_work_init_delayable(&pending_store, store_pending);
106107

0 commit comments

Comments
 (0)