Skip to content

Commit 395f7d3

Browse files
committed
[nrf fromlist] settings: zms: code style clean up
Clean some parts of the code and refactor it to avoid multiple nested conditions Upstream PR #: 87792 Signed-off-by: Riadh Ghaddab <[email protected]> (cherry picked from commit b053d9088902ff82400f95b25a200effbe32c234)
1 parent 1c0fce6 commit 395f7d3

File tree

2 files changed

+106
-83
lines changed

2 files changed

+106
-83
lines changed

subsys/settings/src/settings_zms.c

Lines changed: 93 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -252,9 +252,10 @@ static int settings_zms_load_subtree(struct settings_store *cs, const struct set
252252
read_fn_arg.id = ZMS_NAME_ID_FROM_HASH(name_hash) + ZMS_DATA_ID_OFFSET;
253253

254254
ret = settings_call_set_handler(arg->subtree, rc2, settings_zms_read_fn,
255-
&read_fn_arg, (void *)arg);
255+
&read_fn_arg, arg);
256256
/* We should return here as there are no need to look for the next
257-
* hash collision */
257+
* hash collision
258+
*/
258259
return ret;
259260
}
260261

@@ -302,6 +303,37 @@ static ssize_t settings_zms_load_one(struct settings_store *cs, const char *name
302303
return rc;
303304
}
304305

306+
/* Gets the next linked list node either from cache (if enabled) or from persistent
307+
* storage if cache is full or cache is not enabled.
308+
* It updates as well the next cache index and the next linked list node ID.
309+
*/
310+
static int settings_zms_get_next_ll(struct settings_zms *cf,
311+
struct settings_hash_linked_list *settings_element,
312+
uint32_t *ll_hash_id, [[maybe_unused]] uint32_t *ll_cache_index)
313+
{
314+
int ret = 0;
315+
316+
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
317+
if (*ll_cache_index < cf->ll_cache_next) {
318+
*settings_element = cf->ll_cache[*ll_cache_index];
319+
*ll_cache_index = *ll_cache_index + 1;
320+
} else {
321+
#endif
322+
ret = zms_read(&cf->cf_zms, *ll_hash_id, settings_element,
323+
sizeof(struct settings_hash_linked_list));
324+
if (ret < 0) {
325+
return ret;
326+
}
327+
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
328+
}
329+
#endif
330+
331+
/* update next ll_hash_id */
332+
*ll_hash_id = settings_element->next_hash;
333+
334+
return 0;
335+
}
336+
305337
static int settings_zms_load(struct settings_store *cs, const struct settings_load_arg *arg)
306338
{
307339
int ret = 0;
@@ -312,6 +344,8 @@ static int settings_zms_load(struct settings_store *cs, const struct settings_lo
312344
ssize_t rc1;
313345
ssize_t rc2;
314346
uint32_t ll_hash_id;
347+
uint32_t ll_cache_index = 0;
348+
315349
#ifdef CONFIG_SETTINGS_ZMS_LOAD_SUBTREE_PATH
316350
/* If arg->subtree is not null we must load settings in that subtree */
317351
if (arg->subtree != NULL) {
@@ -323,26 +357,22 @@ static int settings_zms_load(struct settings_store *cs, const struct settings_lo
323357
#endif /* CONFIG_SETTINGS_ZMS_LOAD_SUBTREE_PATH */
324358

325359
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
326-
uint32_t ll_cache_index = 0;
327-
328360
if (cf->ll_has_changed) {
329361
/* reload the linked list in cache */
330362
ret = settings_zms_get_last_hash_ids(cf);
331363
if (ret < 0) {
332364
return ret;
333365
}
334366
}
335-
settings_element = cf->ll_cache[ll_cache_index++];
336-
#else
337-
ret = zms_read(&cf->cf_zms, ZMS_LL_HEAD_HASH_ID, &settings_element,
338-
sizeof(struct settings_hash_linked_list));
367+
#endif
368+
369+
/* If subtree is NULL then we must load all found Settings */
370+
ll_hash_id = ZMS_LL_HEAD_HASH_ID;
371+
ret = settings_zms_get_next_ll(cf, &settings_element, &ll_hash_id, &ll_cache_index);
339372
if (ret < 0) {
340373
return ret;
341374
}
342-
#endif /* CONFIG_SETTINGS_ZMS_LL_CACHE */
343-
ll_hash_id = settings_element.next_hash;
344375

345-
/* If subtree is NULL then we must load all found Settings */
346376
while (ll_hash_id) {
347377

348378
/* In the ZMS backend, each setting item is stored in two ZMS
@@ -369,21 +399,13 @@ static int settings_zms_load(struct settings_store *cs, const struct settings_lo
369399
return ret;
370400
}
371401
#endif /* CONFIG_SETTINGS_ZMS_NO_LL_DELETE */
372-
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
373-
if (ll_cache_index < cf->ll_cache_next) {
374-
settings_element = cf->ll_cache[ll_cache_index++];
375-
} else {
376-
#endif
377-
ret = zms_read(&cf->cf_zms, ll_hash_id, &settings_element,
378-
sizeof(struct settings_hash_linked_list));
379-
if (ret < 0) {
380-
return ret;
381-
}
382-
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
402+
403+
ret = settings_zms_get_next_ll(cf, &settings_element, &ll_hash_id,
404+
&ll_cache_index);
405+
if (ret < 0) {
406+
return ret;
383407
}
384-
#endif
385-
/* update next ll_hash_id */
386-
ll_hash_id = settings_element.next_hash;
408+
387409
continue;
388410
}
389411

@@ -402,27 +424,15 @@ static int settings_zms_load(struct settings_store *cs, const struct settings_lo
402424
settings_zms_cache_add(cf, ll_hash_id & ZMS_HASH_MASK, cache_flags);
403425
#endif
404426

405-
ret = settings_call_set_handler(name, rc2, settings_zms_read_fn, &read_fn_arg,
406-
(void *)arg);
427+
ret = settings_call_set_handler(name, rc2, settings_zms_read_fn, &read_fn_arg, arg);
407428
if (ret) {
408-
break;
429+
return ret;
409430
}
410431

411-
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
412-
if (ll_cache_index < cf->ll_cache_next) {
413-
settings_element = cf->ll_cache[ll_cache_index++];
414-
} else {
415-
#endif
416-
ret = zms_read(&cf->cf_zms, ll_hash_id, &settings_element,
417-
sizeof(struct settings_hash_linked_list));
418-
if (ret < 0) {
419-
return ret;
420-
}
421-
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
432+
ret = settings_zms_get_next_ll(cf, &settings_element, &ll_hash_id, &ll_cache_index);
433+
if (ret < 0) {
434+
return ret;
422435
}
423-
#endif
424-
/* update next ll_hash_id */
425-
ll_hash_id = settings_element.next_hash;
426436
}
427437

428438
return ret;
@@ -661,6 +671,45 @@ static ssize_t settings_zms_get_val_len(struct settings_store *cs, const char *n
661671
return 0;
662672
}
663673

674+
/* This function inits the linked list head if it doesn't exist or recover it
675+
* if the ll_last_hash_id is different than the head hash ID
676+
*/
677+
static int settings_zms_init_or_recover_ll(struct settings_zms *cf, uint32_t ll_last_hash_id)
678+
{
679+
struct settings_hash_linked_list settings_element;
680+
int rc = 0;
681+
682+
if (ll_last_hash_id == ZMS_LL_HEAD_HASH_ID) {
683+
/* header doesn't exist */
684+
settings_element.previous_hash = 0;
685+
settings_element.next_hash = 0;
686+
rc = zms_write(&cf->cf_zms, ZMS_LL_HEAD_HASH_ID, &settings_element,
687+
sizeof(struct settings_hash_linked_list));
688+
if (rc < 0) {
689+
return rc;
690+
}
691+
cf->last_hash_id = ZMS_LL_HEAD_HASH_ID;
692+
cf->second_to_last_hash_id = 0;
693+
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
694+
/* store the LL header in cache */
695+
cf->ll_cache_next = 0;
696+
cf->ll_cache[cf->ll_cache_next] = settings_element;
697+
cf->ll_cache_next = cf->ll_cache_next + 1;
698+
#endif
699+
} else {
700+
/* let's recover it by keeping all nodes until the last one */
701+
settings_element.previous_hash = cf->second_to_last_hash_id;
702+
settings_element.next_hash = 0;
703+
rc = zms_write(&cf->cf_zms, cf->last_hash_id, &settings_element,
704+
sizeof(struct settings_hash_linked_list));
705+
if (rc < 0) {
706+
return rc;
707+
}
708+
}
709+
710+
return 0;
711+
}
712+
664713
static int settings_zms_get_last_hash_ids(struct settings_zms *cf)
665714
{
666715
struct settings_hash_linked_list settings_element;
@@ -680,35 +729,7 @@ static int settings_zms_get_last_hash_ids(struct settings_zms *cf)
680729
/* header doesn't exist or linked list broken, reinitialize the header
681730
* if it doesn't exist and recover it if it is broken
682731
*/
683-
if (ll_last_hash_id == ZMS_LL_HEAD_HASH_ID) {
684-
/* header doesn't exist */
685-
const struct settings_hash_linked_list settings_element = {
686-
.previous_hash = 0, .next_hash = 0};
687-
rc = zms_write(&cf->cf_zms, ZMS_LL_HEAD_HASH_ID, &settings_element,
688-
sizeof(struct settings_hash_linked_list));
689-
if (rc < 0) {
690-
return rc;
691-
}
692-
cf->last_hash_id = ZMS_LL_HEAD_HASH_ID;
693-
cf->second_to_last_hash_id = 0;
694-
#ifdef CONFIG_SETTINGS_ZMS_LL_CACHE
695-
/* store the LL header in cache */
696-
cf->ll_cache_next = 0;
697-
cf->ll_cache[cf->ll_cache_next] = settings_element;
698-
cf->ll_cache_next = cf->ll_cache_next + 1;
699-
#endif
700-
} else {
701-
/* let's recover it by keeping all nodes until the last one */
702-
const struct settings_hash_linked_list settings_element = {
703-
.previous_hash = cf->second_to_last_hash_id,
704-
.next_hash = 0};
705-
rc = zms_write(&cf->cf_zms, cf->last_hash_id, &settings_element,
706-
sizeof(struct settings_hash_linked_list));
707-
if (rc < 0) {
708-
return rc;
709-
}
710-
}
711-
return 0;
732+
return settings_zms_init_or_recover_ll(cf, ll_last_hash_id);
712733
} else if (rc < 0) {
713734
return rc;
714735
}
@@ -768,6 +789,7 @@ static int settings_zms_backend_init(struct settings_zms *cf)
768789
}
769790

770791
cf->hash_collision_num = 0;
792+
cf->ll_cache_next = 0;
771793

772794
rc = settings_zms_get_last_hash_ids(cf);
773795

tests/subsys/settings/performance/settings_test_perf.c

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -19,20 +19,21 @@ static struct k_work_q settings_work_q;
1919
static K_THREAD_STACK_DEFINE(settings_work_stack, 2024);
2020
static struct k_work_delayable pending_store;
2121

22-
#define TEST_SETTINGS_COUNT (128)
23-
#define TEST_STORE_ITR (5)
24-
#define TEST_TIMEOUT_SEC (60)
22+
#define TEST_SETTINGS_COUNT (128)
23+
#define TEST_STORE_ITR (5)
24+
#define TEST_TIMEOUT_SEC (60)
2525
#define TEST_SETTINGS_WORKQ_PRIO (1)
2626

27-
static void bt_scan_cb(const bt_addr_le_t *addr, int8_t rssi, uint8_t adv_type,
28-
struct net_buf_simple *buf)
27+
static void bt_scan_cb([[maybe_unused]] const bt_addr_le_t *addr, [[maybe_unused]] int8_t rssi,
28+
[[maybe_unused]] uint8_t adv_type, struct net_buf_simple *buf)
2929
{
3030
printk("len %u\n", buf->len);
3131
}
3232

3333
struct test_setting {
3434
uint32_t val;
35-
} test_settings[TEST_SETTINGS_COUNT];
35+
};
36+
struct test_setting test_settings[TEST_SETTINGS_COUNT];
3637

3738
K_SEM_DEFINE(waitfor_work, 0, 1);
3839

@@ -45,14 +46,15 @@ static void store_pending(struct k_work *work)
4546
uint32_t total_measured;
4647
uint32_t single_entry_max;
4748
uint32_t single_entry_min;
48-
} stats = {0, 0, 0, UINT32_MAX};
49+
};
50+
struct test_stats stats = {0, 0, 0, UINT32_MAX};
4951

5052
int64_t ts1 = k_uptime_get();
5153

5254
/* benchmark storage performance */
5355
for (int j = 0; j < TEST_STORE_ITR; j++) {
5456
for (int i = 0; i < TEST_SETTINGS_COUNT; i++) {
55-
test_settings[i].val = TEST_SETTINGS_COUNT*j + i;
57+
test_settings[i].val = TEST_SETTINGS_COUNT * j + i;
5658

5759
int64_t ts2 = k_uptime_get();
5860

@@ -80,8 +82,7 @@ static void store_pending(struct k_work *work)
8082
printk("*** storing of %u entries completed ***\n", ARRAY_SIZE(test_settings));
8183
printk("total calculated: %u, total measured: %u\n", stats.total_calculated,
8284
stats.total_measured);
83-
printk("entry max: %u, entry min: %u\n", stats.single_entry_max,
84-
stats.single_entry_min);
85+
printk("entry max: %u, entry min: %u\n", stats.single_entry_max, stats.single_entry_min);
8586

8687
k_sem_give(&waitfor_work);
8788
}
@@ -99,8 +100,8 @@ ZTEST(settings_perf, test_performance)
99100
}
100101

101102
k_work_queue_start(&settings_work_q, settings_work_stack,
102-
K_THREAD_STACK_SIZEOF(settings_work_stack),
103-
K_PRIO_COOP(TEST_SETTINGS_WORKQ_PRIO), NULL);
103+
K_THREAD_STACK_SIZEOF(settings_work_stack),
104+
K_PRIO_COOP(TEST_SETTINGS_WORKQ_PRIO), NULL);
104105
k_thread_name_set(&settings_work_q.thread, "Settings workq");
105106
k_work_init_delayable(&pending_store, store_pending);
106107

0 commit comments

Comments
 (0)