Skip to content

Commit 67e3de7

Browse files
Merge pull request #17 from esl/otp27-docs
Convert docs to OTP27 style, with backwards compatibility
2 parents 4102b01 + ddb3f34 commit 67e3de7

File tree

5 files changed

+135
-89
lines changed

5 files changed

+135
-89
lines changed

rebar.config

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
1-
{erl_opts, []}.
1+
{erl_opts,
2+
[warn_missing_doc, warn_missing_spec, warn_unused_import,
3+
warn_export_vars, verbose, report, debug_info
4+
]}.
25

36
{deps, [
47
{telemetry, "1.3.0"}

src/segmented_cache.erl

Lines changed: 121 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -1,146 +1,187 @@
1-
%%%-------------------------------------------------------------------
2-
%% @doc `segmented_cache' is a key/value pairs cache library implemented in rotating segments.
3-
%%
4-
%% For more information, see the README, and the function documentation.
5-
%% @end
6-
%%%-------------------------------------------------------------------
71
-module(segmented_cache).
82

3+
-if(?OTP_RELEASE >= 27).
4+
-define(MODULEDOC(Str), -moduledoc(Str)).
5+
-define(DOC(Str), -doc(Str)).
6+
-else.
7+
-define(MODULEDOC(Str), -compile([])).
8+
-define(DOC(Str), -compile([])).
9+
-endif.
10+
11+
?MODULEDOC("""
12+
`segmented_cache` is a key/value pairs cache library implemented in rotating segments.
13+
14+
For more information, see the README, and the function documentation.
15+
""").
16+
917
%% API
1018
-export([start/1, start/2]).
1119
-export([start_link/1, start_link/2]).
1220
-export([is_member/2, get_entry/2, put_entry/3, merge_entry/3, delete_entry/2, delete_pattern/2]).
1321

22+
?DOC("Telemetry metadata with cache hit information.").
23+
-type hit() :: #{name => name(), hit => boolean()}.
24+
25+
?DOC("Telemetry metadata with deletion error information.").
26+
-type delete_error(Key) :: #{name => atom(),
27+
value => Key,
28+
delete_type => entry | pattern,
29+
class => throw | error | exit,
30+
reason => term()}.
31+
32+
?DOC("`m:pg` scope for cache coordination across distribution.").
1433
-type scope() :: atom().
34+
?DOC("Cache unique name.").
1535
-type name() :: atom().
36+
?DOC("Strategy for cache eviction.").
1637
-type strategy() :: fifo | lru.
38+
?DOC("Dynamic type of _keys_ from cache clients.").
1739
-type key() :: term().
40+
?DOC("Dynamic type of _values_ from cache clients.").
1841
-type value() :: term().
42+
?DOC("Merging function to use for resolving conflicts").
1943
-type merger_fun(Value) :: fun((Value, Value) -> Value).
44+
?DOC("Configuration values for the cache.").
2045
-type opts() :: #{scope => scope(),
2146
strategy => strategy(),
2247
segment_num => non_neg_integer(),
2348
ttl => timeout() | {erlang:time_unit(), non_neg_integer()},
2449
merger_fun => merger_fun(term())}.
2550

26-
-export_type([scope/0, name/0, key/0, value/0, strategy/0, merger_fun/1, opts/0]).
51+
-export_type([scope/0, name/0, key/0, value/0, hit/0, delete_error/1,
52+
strategy/0, merger_fun/1, opts/0]).
2753

2854
%%====================================================================
2955
%% API
3056
%%====================================================================
3157

32-
%% @see start_link/2
58+
?DOC("See `start_link/2` for more details").
3359
-spec start(name()) -> gen_server:start_ret().
3460
start(Name) when is_atom(Name) ->
3561
start(Name, #{}).
3662

37-
%% @see start_link/2
63+
?DOC("See `start_link/2` for more details").
3864
-spec start(name(), opts()) -> gen_server:start_ret().
3965
start(Name, Opts) when is_atom(Name), is_map(Opts) ->
4066
segmented_cache_server:start(Name, Opts).
4167

42-
%% @see start_link/2
68+
?DOC("See `start_link/2` for more details").
4369
-spec start_link(name()) -> gen_server:start_ret().
4470
start_link(Name) when is_atom(Name) ->
4571
start_link(Name, #{}).
4672

47-
%% @doc Start and link a cache entity in the local node
48-
%%
49-
%% `Name' must be an atom. Then the cache will be identified by the pair `{segmented_cache, Name}',
50-
%% and an entry in persistent_term will be created and the worker will join a pg group of
51-
%% the same name.
52-
%% `Opts' is a map containing the configuration.
53-
%% `scope' is a `pg' scope. Defaults to `pg'.
54-
%% `strategy' can be fifo or lru. Default is `fifo'.
55-
%% `segment_num' is the number of segments for the cache. Default is `3'
56-
%% `ttl' is the live, in minutes, of _each_ segment. Default is `480', i.e., 8 hours.
57-
%% `merger_fun' is a function that, given a conflict, takes in order the old and new values and
58-
%% applies a merging strategy. See the `merger_fun(term())' type.
73+
?DOC("""
74+
Start and link a cache entity in the local node.
75+
76+
`Name` must be an atom. Then the cache will be identified by the pair `{segmented_cache, Name}`,
77+
and an entry in persistent_term will be created and the worker will join a pg group of
78+
the same name.
79+
`Opts` is a map containing the configuration.
80+
- `scope` is a `pg` scope. Defaults to `pg`.
81+
- `strategy` can be fifo or lru. Default is `fifo`.
82+
- `segment_num` is the number of segments for the cache. Default is `3`
83+
- `ttl` is the live, in minutes, of _each_ segment. Default is `480`, i.e., 8 hours.
84+
- `merger_fun` is a function that, given a conflict,
85+
takes in order the old and new values and applies a merging strategy.
86+
See the `t:merger_fun/1` type.
87+
""").
5988
-spec start_link(name(), opts()) -> gen_server:start_ret().
6089
start_link(Name, Opts) when is_atom(Name), is_map(Opts) ->
6190
segmented_cache_server:start_link(Name, Opts).
6291

63-
%% @doc Check if Key is cached
64-
%%
65-
%% Raises telemetry span
66-
%% name: [segmented_cache, Name, request, _]
67-
%% start metadata: #{name => atom()}
68-
%% stop metadata: #{name => atom(), hit => boolean()}
92+
?DOC("""
93+
Check if Key is cached.
94+
95+
Raises a telemetry span:
96+
- name: `[segmented_cache, Name, request, _]`
97+
- start metadata: `#{name => atom()}`
98+
- stop metadata: `t:hit/0`
99+
""").
69100
-spec is_member(name(), key()) -> boolean().
70101
is_member(Name, Key) when is_atom(Name) ->
71102
Span = segmented_cache_helpers:is_member_span(Name, Key),
72103
telemetry:span([segmented_cache, Name, request], #{name => Name, type => is_member}, Span).
73104

74-
%% @doc Get the entry for Key in cache
75-
%%
76-
%% Raises telemetry span
77-
%% name: [segmented_cache, Name, request, _]
78-
%% start metadata: #{name => atom()}
79-
%% stop metadata: #{name => atom(), hit => boolean()}
105+
?DOC("""
106+
Get the entry for Key in cache.
107+
108+
Raises telemetry span:
109+
- name: `[segmented_cache, Name, request, _]`
110+
- start metadata: `#{name => atom()}`
111+
- stop metadata: `t:hit/0`
112+
""").
80113
-spec get_entry(name(), key()) -> value() | not_found.
81114
get_entry(Name, Key) when is_atom(Name) ->
82115
Span = segmented_cache_helpers:get_entry_span(Name, Key),
83116
telemetry:span([segmented_cache, Name, request], #{name => Name, type => get_entry}, Span).
84117

85-
%% @doc Add an entry to the first table in the segments.
86-
%%
87-
%% Possible race conditions:
88-
%% <li> Two writers: another process might attempt to put a record at the same time. It this case,
89-
%% both writers will attempt `ets:insert_new', resulting in only one of them succeeding.
90-
%% The one that fails, will retry three times a `compare_and_swap', attempting to merge the
91-
%% values and ensuring no data is lost.</li>
92-
%% <li> One worker and the cleaner: there's a chance that by the time we insert in the ets table,
93-
%% this table is not the first anymore because the cleaner has taken action and pushed it
94-
%% behind.</li>
95-
%% <li> Two writers and the cleaner: a mix of the previous, it can happen that two writers can
96-
%% attempt to put a record at the same time, but exactly in-between, the cleaner rotates the
97-
%% tables, resulting in the first writter inserting in the table that immediately becomes the
98-
%% second, and the latter writter inserting in the recently treated as first, shadowing the
99-
%% previous.</li>
100-
%%
101-
%% To treat the data race with the cleaner, after a successful insert, we re-check the index,
102-
%% and if it has changed, we restart the whole operation again: we can be sure that no more
103-
%% rotations will be triggered in a while, so the second round will be final.
104-
%%
105-
%% Strategy considerations: under a fifo strategy, no other writes can happen, but under a lru
106-
%% strategy, many other workers might attemp to move a record forward. In this case, the
107-
%% forwarding movement doesn't modify the record, and therefore the `compare_and_swap'
108-
%% operation should succeed at once; then, once the record is in the front, all other workers
109-
%% shouldn't be attempting to move it.
118+
?DOC("""
119+
Add an entry to the first table in the segments.
120+
121+
### Possible race conditions:
122+
- Two writers: another process might attempt to put a record at the same time. It this case,
123+
both writers will attempt `ets:insert_new`, resulting in only one of them succeeding.
124+
The one that fails, will retry three times a `compare_and_swap`, attempting to merge the
125+
values and ensuring no data is lost.
126+
- One worker and the cleaner: there's a chance that by the time we insert in the ets table,
127+
this table is not the first anymore because the cleaner has taken action and pushed it
128+
behind.
129+
- Two writers and the cleaner: a mix of the previous, it can happen that two writers can
130+
attempt to put a record at the same time, but exactly in-between, the cleaner rotates the
131+
tables, resulting in the first writter inserting in the table that immediately becomes the
132+
second, and the latter writter inserting in the recently treated as first, shadowing the
133+
previous.
134+
135+
To treat the data race with the cleaner, after a successful insert,
136+
we re-check the index, and if it has changed, we restart the whole operation again:
137+
we can be sure that no more rotations will be triggered in a while,
138+
so the second round will be final.
139+
140+
### Strategy considerations:
141+
Under a fifo strategy, no other writes can happen, but under a lru strategy,
142+
many other workers might attemp to move a record forward. In this case,
143+
the forwarding movement doesn't modify the record, and therefore the `compare_and_swap`
144+
operation should succeed at once; then, once the record is in the front,
145+
all other workers shouldn't be attempting to move it.
146+
""").
110147
-spec put_entry(name(), key(), value()) -> boolean().
111148
put_entry(Name, Key, Value) when is_atom(Name) ->
112149
segmented_cache_helpers:put_entry_front(Name, Key, Value).
113150

114-
%% @doc Merge a new entry into an existing one, or add it at the front if none is found.
115-
%%
116-
%% Race conditions considerations:
117-
%% <li> Two writers: `compare_and_swap' will ensure they both succeed sequentially</li>
118-
%% <li> Any writers and the cleaner: under fifo, the writer modifies the record in place
119-
%% and doesn't need to be concerned with rotation. Under lru, the same considerations
120-
%% than for a `put_entry_front' apply.</li>
151+
?DOC("""
152+
Merge a new entry into an existing one, or add it at the front if none is found.
153+
154+
Race conditions considerations:
155+
- Two writers: `compare_and_swap` will ensure they both succeed sequentially
156+
- Any writers and the cleaner: under fifo, the writer modifies the record in place
157+
and doesn't need to be concerned with rotation. Under lru, the same considerations
158+
than for a `put_entry_front` apply.
159+
""").
121160
-spec merge_entry(name(), key(), value()) -> boolean().
122161
merge_entry(Name, Key, Value) when is_atom(Name) ->
123162
segmented_cache_helpers:merge_entry(Name, Key, Value).
124163

125-
%% @doc Delete an entry in all ets segments
126-
%%
127-
%% Might raise a telemetry error if the request fails:
128-
%% name: [segmented_cache, Name, delete_error]
129-
%% measurements: #{}
130-
%% metadata: #{name => atom(), delete_type => entry, value => Key,
131-
%% class => throw | error | exit, reason => term()}
164+
?DOC("""
165+
Delete an entry in all ets segments.
166+
167+
Might raise a telemetry error if the request fails:
168+
- name: `[segmented_cache, Name, delete_error]`
169+
- measurements: `#{}`
170+
- metadata: `t:delete_error/1`
171+
""").
132172
-spec delete_entry(name(), key()) -> true.
133173
delete_entry(Name, Key) when is_atom(Name) ->
134174
segmented_cache_server:request_delete_entry(Name, Key),
135175
segmented_cache_helpers:delete_entry(Name, Key).
136176

137-
%% @doc Delete a pattern in all ets segments
138-
%%
139-
%% Might raise a telemetry error if the request fails:
140-
%% name: [segmented_cache, Name, delete_error]
141-
%% measurements: #{}
142-
%% metadata: #{name => atom(), delete_type => pattern, value => Pattern,
143-
%% class => throw | error | exit, reason => term()}
177+
?DOC("""
178+
Delete a pattern in all ets segments.
179+
180+
Might raise a telemetry error if the request fails:
181+
- name: `[segmented_cache, Name, delete_error]`
182+
- measurements: `#{}`
183+
- metadata: `t:delete_error/1`
184+
""").
144185
-spec delete_pattern(name(), ets:match_pattern()) -> true.
145186
delete_pattern(Name, Pattern) when is_atom(Name) ->
146187
segmented_cache_server:request_delete_pattern(Name, Pattern),

src/segmented_cache_callbacks.erl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
%% @private
21
-module(segmented_cache_callbacks).
2+
-moduledoc false.
33

44
-export([is_member_ets_fun/2, get_entry_ets_fun/2,
55
delete_entry_fun/2, delete_pattern_fun/2,

src/segmented_cache_helpers.erl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
%% @private
21
-module(segmented_cache_helpers).
2+
-moduledoc false.
33

44
-define(APP_KEY, segmented_cache).
55

@@ -238,6 +238,7 @@ compare_and_swap(Attempts, EtsSegment, Key, Value, MergerFun) ->
238238
%% Note that we must first empty the last table, and then rotate the index. If it was done
239239
%% in the opposite order, there's a chance a worker can insert an entry at the front just
240240
%% before the table is purged.
241+
-spec purge_last_segment_and_rotate(segmented_cache:name()) -> non_neg_integer().
241242
purge_last_segment_and_rotate(Name) ->
242243
SegmentRecord = get_cache_config(Name),
243244
Index = atomics:get(SegmentRecord#segmented_cache.index, 1),

src/segmented_cache_server.erl

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
%% @private
21
-module(segmented_cache_server).
2+
-moduledoc false.
33

44
-behaviour(gen_server).
55

@@ -10,11 +10,11 @@
1010
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]).
1111

1212
-type request_content() :: term().
13-
1413
-record(cache_state, {scope :: segmented_cache:scope(),
1514
name :: segmented_cache:name(),
1615
ttl :: timeout(),
1716
timer_ref :: undefined | reference()}).
17+
-type state() :: #cache_state{}.
1818

1919
%%====================================================================
2020
%% API
@@ -40,7 +40,7 @@ request_delete_pattern(Name, Pattern) ->
4040
%% gen_server callbacks
4141
%%====================================================================
4242

43-
-spec init({segmented_cache:name(), segmented_cache:opts()}) -> {ok, #cache_state{}}.
43+
-spec init({segmented_cache:name(), segmented_cache:opts()}) -> {ok, state()}.
4444
init({Name, Opts}) ->
4545
#{scope := Scope, ttl := TTL} = segmented_cache_helpers:init_cache_config(Name, Opts),
4646
pg:join(Scope, Name, self()),
@@ -52,11 +52,11 @@ init({Name, Opts}) ->
5252
{ok, #cache_state{scope = Scope, name = Name, ttl = TTL, timer_ref = TimerRef}}
5353
end.
5454

55-
-spec handle_call(any(), gen_server:from(), #cache_state{}) -> {reply, ok, #cache_state{}}.
55+
-spec handle_call(any(), gen_server:from(), state()) -> {reply, ok, state()}.
5656
handle_call(_Msg, _From, State) ->
5757
{reply, ok, State}.
5858

59-
-spec handle_cast(term(), #cache_state{}) -> {noreply, #cache_state{}}.
59+
-spec handle_cast(term(), state()) -> {noreply, state()}.
6060
handle_cast({delete_entry, Key}, #cache_state{name = Name} = State) ->
6161
segmented_cache_helpers:delete_entry(Name, Key),
6262
{noreply, State};
@@ -66,7 +66,7 @@ handle_cast({delete_pattern, Pattern}, #cache_state{name = Name} = State) ->
6666
handle_cast(_Msg, State) ->
6767
{noreply, State}.
6868

69-
-spec handle_info(any(), #cache_state{}) -> {noreply, #cache_state{}}.
69+
-spec handle_info(any(), state()) -> {noreply, state()}.
7070
handle_info(purge, #cache_state{name = Name, ttl = TTL} = State) ->
7171
segmented_cache_helpers:purge_last_segment_and_rotate(Name),
7272
case TTL of
@@ -76,6 +76,7 @@ handle_info(purge, #cache_state{name = Name, ttl = TTL} = State) ->
7676
handle_info(_Msg, State) ->
7777
{noreply, State}.
7878

79+
-spec terminate(normal | shutdown | {shutdown, term()} | term(), state()) -> term().
7980
terminate(_Reason, #cache_state{name = Name, timer_ref = TimerRef}) ->
8081
segmented_cache_helpers:erase_cache_config(Name),
8182
case TimerRef of

0 commit comments

Comments
 (0)