Skip to content

Commit 17d9503

Browse files
rghaddabrlubos
authored andcommitted
Revert "[nrf noup] settings: zms: use dedicated lookup cache hash function"
This reverts commit 98662fc. Signed-off-by: Riadh Ghaddab <[email protected]>
1 parent 8411e6a commit 17d9503

File tree

2 files changed

+0
-54
lines changed

2 files changed

+0
-54
lines changed

subsys/fs/zms/Kconfig

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -51,15 +51,6 @@ config ZMS_MAX_BLOCK_SIZE
5151
help
5252
Changes the internal buffer size of ZMS
5353

54-
config ZMS_LOOKUP_CACHE_FOR_SETTINGS
55-
bool "ZMS Storage lookup cache optimized for settings"
56-
depends on ZMS_LOOKUP_CACHE
57-
help
58-
Use the lookup cache hash function that results in the least number of
59-
collissions and, in turn, the best ZMS performance provided that the ZMS
60-
is used as the settings backend only. This option should NOT be enabled
61-
if the ZMS is also written to directly, outside the settings layer.
62-
6354
module = ZMS
6455
module-str = zms
6556
source "subsys/logging/Kconfig.template.log_config"

subsys/fs/zms/zms.c

Lines changed: 0 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,6 @@
1111
#include <zephyr/fs/zms.h>
1212
#include <zephyr/sys/crc.h>
1313
#include "zms_priv.h"
14-
#ifdef CONFIG_ZMS_LOOKUP_CACHE_FOR_SETTINGS
15-
#include <zephyr/sys/util.h>
16-
#include <settings/settings_zms.h>
17-
#endif
1814

1915
#include <zephyr/logging/log.h>
2016
LOG_MODULE_REGISTER(fs_zms, CONFIG_ZMS_LOG_LEVEL);
@@ -29,45 +25,6 @@ static int zms_ate_valid_different_sector(struct zms_fs *fs, const struct zms_at
2925

3026
#ifdef CONFIG_ZMS_LOOKUP_CACHE
3127

32-
#ifdef CONFIG_ZMS_LOOKUP_CACHE_FOR_SETTINGS
33-
34-
static inline size_t zms_lookup_cache_pos(uint32_t id)
35-
{
36-
/*
37-
* 1. The ZMS settings backend uses up to (ZMS_NAME_ID_OFFSET - 1) ZMS IDs to
38-
store keys and equal number of ZMS IDs to store values.
39-
* 2. For each key-value pair, the value is stored at ZMS ID greater by exactly
40-
* ZMS_NAME_ID_OFFSET than ZMS ID that holds the key.
41-
* 3. The backend tries to minimize the range of ZMS IDs used to store keys.
42-
* That is, ZMS IDs are allocated sequentially, and freed ZMS IDs are reused
43-
* before allocating new ones.
44-
*
45-
* Therefore, to assure the least number of collisions in the lookup cache,
46-
* the least significant bit of the hash indicates whether the given ZMS ID
47-
* represents a key or a value, and remaining bits of the hash are set to
48-
* the ordinal number of the key-value pair. Consequently, the hash function
49-
* provides the following mapping:
50-
*
51-
* 1st settings key => hash 0
52-
* 1st settings value => hash 1
53-
* 2nd settings key => hash 2
54-
* 2nd settings value => hash 3
55-
* ...
56-
*/
57-
BUILD_ASSERT(IS_POWER_OF_TWO(ZMS_NAMECNT_ID), "ZMS_NAMECNT_ID is not power of 2");
58-
BUILD_ASSERT(IS_POWER_OF_TWO(ZMS_NAME_ID_OFFSET), "ZMS_NAME_ID_OFFSET is not power of 2");
59-
60-
uint32_t key_value_bit;
61-
uint32_t key_value_ord;
62-
63-
key_value_bit = (id >> LOG2(ZMS_NAME_ID_OFFSET)) & 1;
64-
key_value_ord = id & (ZMS_NAME_ID_OFFSET - 1);
65-
66-
return ((key_value_ord << 1) | key_value_bit) % CONFIG_ZMS_LOOKUP_CACHE_SIZE;
67-
}
68-
69-
#else /* CONFIG_ZMS_LOOKUP_CACHE_FOR_SETTINGS */
70-
7128
static inline size_t zms_lookup_cache_pos(uint32_t id)
7229
{
7330
uint32_t hash;
@@ -83,8 +40,6 @@ static inline size_t zms_lookup_cache_pos(uint32_t id)
8340
return hash % CONFIG_ZMS_LOOKUP_CACHE_SIZE;
8441
}
8542

86-
#endif /* CONFIG_ZMS_LOOKUP_CACHE_FOR_SETTINGS */
87-
8843
static int zms_lookup_cache_rebuild(struct zms_fs *fs)
8944
{
9045
int rc, previous_sector_num = ZMS_INVALID_SECTOR_NUM;

0 commit comments

Comments
 (0)