Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion lib/sentry/application.ex
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ defmodule Sentry.Application do
] ++
maybe_http_client_spec ++
maybe_span_storage ++
[Sentry.Transport.SenderPool]
[Sentry.Transport.RateLimiter, Sentry.Transport.SenderPool]

cache_loaded_applications()

Expand Down
5 changes: 5 additions & 0 deletions lib/sentry/client_error.ex
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ defmodule Sentry.ClientError do
"""
@type reason() ::
:too_many_retries
| :rate_limited
| :server_error
| {:invalid_json, Exception.t()}
| {:request_failure, reason :: :inet.posix() | term()}
Expand Down Expand Up @@ -73,6 +74,10 @@ defmodule Sentry.ClientError do
"Sentry responded with status 429 - Too Many Requests and the SDK exhausted the configured retries"
end

defp format(:rate_limited) do
"the event was dropped because the category is currently rate-limited by Sentry"
end

defp format({:invalid_json, reason}) do
formatted =
if is_exception(reason) do
Expand Down
1 change: 1 addition & 0 deletions lib/sentry/client_report/sender.ex
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ defmodule Sentry.ClientReport.Sender do
| Sentry.CheckIn.t()
| ClientReport.t()
| Sentry.Event.t()
| Sentry.Transaction.t()
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This fix is unrelated right?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this was because of a dialyzer complaint, but now when I remove it I don't seem to be getting it anymore. But I noticed that the "item" type in that function doesn't match the type provided for "items" in Sentry.Envelope.t().

I thought about centralizing that into a Sentry.Envelope.item() type that could be reused, but decided to just do this for now to keep the update smaller. Since the dialyzer doesn't seem to care now, I'm happy to remove it if you'd prefer not have it in this PR.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes let's remove it and possibly go with the Sentry.Envelope.item() type in a separate PR 👍

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

def record_discarded_events(reason, event_items, genserver)
when is_list(event_items) do
# We silently ignore events whose reasons aren't valid because we have to add it to the allowlist in Snuba
Expand Down
84 changes: 52 additions & 32 deletions lib/sentry/transport.ex
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ defmodule Sentry.Transport do
# This module is exclusively responsible for encoding and POSTing envelopes to Sentry.

alias Sentry.{ClientError, ClientReport, Config, Envelope, LoggerUtils}
alias Sentry.Transport.RateLimiter

@default_retries [1000, 2000, 4000, 8000]
@sentry_version 5
Expand Down Expand Up @@ -47,27 +48,13 @@ defmodule Sentry.Transport do
retries_left,
envelope_items
) do
case request(client, endpoint, headers, payload) do
case request(client, endpoint, headers, payload, envelope_items) do
{:ok, id} ->
{:ok, id}

# If Sentry gives us a Retry-After header, we listen to that instead of our
# own retry.
{:retry_after, delay_ms} when retries_left != [] ->
Process.sleep(delay_ms)

post_envelope_with_retries(
client,
endpoint,
headers,
payload,
tl(retries_left),
envelope_items
)

{:retry_after, _delay_ms} ->
{:error, :rate_limited} ->
ClientReport.Sender.record_discarded_events(:ratelimit_backoff, envelope_items)
{:error, ClientError.new(:too_many_retries)}
{:error, ClientError.new(:rate_limited)}

{:error, _reason} when retries_left != [] ->
[sleep_interval | retries_left] = retries_left
Expand All @@ -92,29 +79,32 @@ defmodule Sentry.Transport do
end
end

defp request(client, endpoint, headers, body) do
with {:ok, 200, _headers, body} <-
defp check_rate_limited(envelope_items) do
rate_limited? =
Enum.any?(envelope_items, fn item ->
category = Envelope.get_data_category(item)
RateLimiter.rate_limited?(category)
end)

if rate_limited?, do: {:error, :rate_limited}, else: :ok
end

defp request(client, endpoint, headers, body, envelope_items) do
with :ok <- check_rate_limited(envelope_items),
{:ok, 200, _headers, body} <-
client_post_and_validate_return_value(client, endpoint, headers, body),
{:ok, json} <- Sentry.JSON.decode(body, Config.json_library()) do
{:ok, Map.get(json, "id")}
else
{:ok, 429, headers, _body} ->
delay_ms =
with timeout when is_binary(timeout) <-
:proplists.get_value("Retry-After", headers, nil),
{delay_s, ""} <- Integer.parse(timeout) do
delay_s * 1000
else
_ ->
# https://develop.sentry.dev/sdk/rate-limiting/#stage-1-parse-response-headers
60_000
end

{:retry_after, delay_ms}
{:ok, 429, _headers, _body} ->
{:error, :rate_limited}

{:ok, status, headers, body} ->
{:error, {:http, {status, headers, body}}}

{:error, :rate_limited} ->
{:error, :rate_limited}

{:error, reason} ->
{:error, {:request_failure, reason}}
end
Expand All @@ -127,6 +117,7 @@ defmodule Sentry.Transport do
{:ok, status, resp_headers, resp_body}
when is_integer(status) and status in 200..599 and is_list(resp_headers) and
is_binary(resp_body) ->
update_rate_limits(resp_headers, status)
{:ok, status, resp_headers, resp_body}

{:ok, status, resp_headers, resp_body} ->
Expand All @@ -137,6 +128,35 @@ defmodule Sentry.Transport do
end
end

defp update_rate_limits(headers, status) do
rate_limits_header = :proplists.get_value("X-Sentry-Rate-Limits", headers, nil)

cond do
is_binary(rate_limits_header) ->
# Use categorized rate limits if present
RateLimiter.update_rate_limits(rate_limits_header)

status == 429 ->
# Use global rate limit from Retry-After if no categorized limits are present
delay_seconds = get_global_delay(headers)
RateLimiter.update_global_rate_limit(delay_seconds)

true ->
:ok
end
end

defp get_global_delay(headers) do
with timeout when is_binary(timeout) <- :proplists.get_value("Retry-After", headers, nil),
{delay, ""} <- Integer.parse(timeout) do
delay
else
# Per the spec, if Retry-After is missing or malformed, default to 60 seconds
# https://develop.sentry.dev/sdk/rate-limiting/#stage-1-parse-response-headers
_ -> 60
end
end

defp get_endpoint_and_headers do
%Sentry.DSN{} = dsn = Config.dsn()

Expand Down
174 changes: 174 additions & 0 deletions lib/sentry/transport/rate_limiter.ex
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
defmodule Sentry.Transport.RateLimiter do
@moduledoc false
# Tracks rate limits per category from Sentry API responses.
# Uses an ETS table to store expiry timestamps for rate-limited categories.
# When Sentry returns a 429 response with rate limit headers, this module
# stores the expiry time per category, allowing other parts of the SDK to
# check if an event should be dropped before sending.
#
# See https://develop.sentry.dev/sdk/expected-features/rate-limiting/

use GenServer

@table __MODULE__
@sweep_interval_ms 60_000

## Public API

@doc """
Starts the RateLimiter GenServer.
"""
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(opts \\ []) do
GenServer.start_link(__MODULE__, nil, name: Keyword.get(opts, :name, __MODULE__))
end

## GenServer Callbacks

@impl true
def init(nil) do
_table = :ets.new(@table, [:named_table, :public, :set])
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would we gain anything in perf by using a ordered set here? You'd have slower inserts but this table is a 99% read heavy table I think. Might be also good to consider read_concurrency.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I definitely should have enabled read_concurrency, and I've fixed that now. What's the thinking behind ordered set instead of just set though? Wouldn't ordered set have worse performance for lookups (log n instead of constant)?

Copy link
Contributor Author

@dnsbty dnsbty Nov 24, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since I was already doing the benchmarking on lookup vs select, I also went ahead and did a quick benchmark on set vs ordered_set:

Benchmark code
# Create a table using a set with no global limit
:ets.new(SetNoGlobal, [:named_table, :public, :set, read_concurrency: true])
:ets.insert(SetNoGlobal, {"error", 1764026065})

# Create a table using a set with global limit
:ets.new(SetWithGlobal, [:named_table, :public, :set, read_concurrency: true])
:ets.insert(SetWithGlobal, {"error", 1764026065})
:ets.insert(SetWithGlobal, {:global, 1764026065})

# Create a table using a set with no global limit
:ets.new(OrderedSetNoGlobal, [:named_table, :public, :set, read_concurrency: true])
:ets.insert(OrderedSetNoGlobal, {"error", 1764026065})

# Create a table using a set with global limit
:ets.new(OrderedSetWithGlobal, [:named_table, :public, :set, read_concurrency: true])
:ets.insert(OrderedSetWithGlobal, {"error", 1764026065})
:ets.insert(OrderedSetWithGlobal, {:global, 1764026065})

Benchee.run(
  %{
    "set (no results)" => fn ->
      :ets.lookup(SetNoGlobal, "transaction")
      :ets.lookup(SetNoGlobal, :global)
    end,
    "set (category limit only)" => fn ->
      :ets.lookup(SetNoGlobal, "error")
      # won't perform the global lookup since the category rate limit was found
    end,
    "set (global limit only)" => fn ->
      :ets.lookup(SetWithGlobal, "transaction")
      :ets.lookup(SetWithGlobal, :global)
    end,
    "set (global and category limits)" => fn ->
      :ets.lookup(SetWithGlobal, "error")
      # won't perform the global lookup since the category rate limit was found
    end,
    "ordered set (no results)" => fn ->
      :ets.lookup(OrderedSetNoGlobal, "transaction")
      :ets.lookup(OrderedSetNoGlobal, :global)
    end,
    "ordered set (category limit only)" => fn ->
      :ets.lookup(OrderedSetNoGlobal, "error")
      # won't perform the global lookup since the category rate limit was found
    end,
    "ordered set (global limit only)" => fn ->
      :ets.lookup(OrderedSetWithGlobal, "error")
      :ets.lookup(OrderedSetWithGlobal, :global)
    end,
    "ordered set (global and category limits)" => fn ->
      :ets.lookup(OrderedSetWithGlobal, "error")
      # won't perform the global lookup since the category rate limit was found
    end,
  }
)

And here are the results:

Full Benchee output
Operating System: macOS
CPU Information: Apple M1 Max
Number of Available Cores: 10
Available memory: 32 GB
Elixir 1.18.3
Erlang 27
JIT enabled: true

Benchmark suite executing with the following configuration:
warmup: 2 s
time: 5 s
memory time: 0 ns
reduction time: 0 ns
parallel: 1
inputs: none specified
Estimated total run time: 56 s
Excluding outliers: false

Benchmarking ordered set (category limit only) ...
Benchmarking ordered set (global and category limits) ...
Benchmarking ordered set (global limit only) ...
Benchmarking ordered set (no results) ...
Benchmarking set (category limit only) ...
Benchmarking set (global and category limits) ...
Benchmarking set (global limit only) ...
Benchmarking set (no results) ...
Calculating statistics...
Formatting results...

Name                                               ips        average  deviation         median         99th %
set (category limit only)                       2.25 M      445.06 ns  ±1116.49%         375 ns        1250 ns
ordered set (global and category limits)        2.24 M      445.96 ns  ±1107.44%         375 ns        1250 ns
set (global and category limits)                2.23 M      449.07 ns  ±1133.50%         375 ns        1250 ns
ordered set (category limit only)               2.08 M      480.31 ns  ±1036.76%         375 ns        1417 ns
ordered set (no results)                        1.52 M      656.08 ns   ±876.48%         583 ns        1500 ns
set (no results)                                1.52 M      659.69 ns   ±962.66%         583 ns        1500 ns
ordered set (global limit only)                 1.49 M      671.89 ns   ±987.77%         583 ns        1542 ns
set (global limit only)                         1.48 M      674.39 ns   ±878.87%         583 ns        1542 ns

Comparison: 
set (category limit only)                       2.25 M
ordered set (global and category limits)        2.24 M - 1.00x slower +0.90 ns
set (global and category limits)                2.23 M - 1.01x slower +4.01 ns
ordered set (category limit only)               2.08 M - 1.08x slower +35.25 ns
ordered set (no results)                        1.52 M - 1.47x slower +211.02 ns
set (no results)                                1.52 M - 1.48x slower +214.64 ns
ordered set (global limit only)                 1.49 M - 1.51x slower +226.83 ns
set (global limit only)                         1.48 M - 1.52x slower +229.33 ns
Comparison: 
set (category limit only)                       2.25 M
ordered set (global and category limits)        2.24 M - 1.00x slower +0.90 ns
set (global and category limits)                2.23 M - 1.01x slower +4.01 ns
ordered set (category limit only)               2.08 M - 1.08x slower +35.25 ns
ordered set (no results)                        1.52 M - 1.47x slower +211.02 ns
set (no results)                                1.52 M - 1.48x slower +214.64 ns
ordered set (global limit only)                 1.49 M - 1.51x slower +226.83 ns
set (global limit only)                         1.48 M - 1.52x slower +229.33 ns

Based on that, it looks like set is maybe a tiny bit faster. It doesn't seem to be a significant difference, but I'll just stick with that default.

schedule_sweep()
{:ok, :no_state}
end

@impl true
def handle_info(:sweep, state) do
now = System.system_time(:second)

# Match spec: select entries where expiry (position 2) < now
match_spec = [{{:"$1", :"$2"}, [{:<, :"$2", now}], [true]}]

:ets.select_delete(@table, match_spec)

schedule_sweep()
{:noreply, state}
end

## Public Functions

@doc """
Checks if the given category is currently rate-limited.
Returns `true` if the category is rate-limited (either specifically or via
a global rate limit), `false` otherwise.
## Examples
iex> RateLimiter.rate_limited?("error")
false
iex> :ets.insert(RateLimiter, {"error", System.system_time(:second) + 60})
iex> RateLimiter.rate_limited?("error")
true
"""
@spec rate_limited?(String.t()) :: boolean()
def rate_limited?(category) do
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Categories here are a fixed set, right? Maybe we could be more specific on the strings we expect here?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

According to the Sentry docs, this is the list of possible categories:

  • default
  • error
  • transaction
  • monitor
  • span
  • log_item
  • security
  • attachment
  • session
  • profile
  • profile_chunk
  • replay
  • feedback
  • trace_metric
  • internal

It looks like right now the library only supports the following categories:

  • error
  • transaction
  • monitor
  • attachment
  • internal

We could enforce that on this function, but the downside I see to that is that it adds one more place where changes have to be made if new categories are added in the future. I think if we wanted to do that, I would probably recommend that we modify Sentry.Envelope.get_data_category/1 to return an atom instead of a string, and then we could create a Sentry.Envelope.data_category() type that would represent the set of possible category atoms like :error | :transaction | :monitor | :attachment | :internal just to make it more clear what is being supported.

That seems like it would probably be outside the scope of this PR, but I'm open to making that change if you think it would be best to do it now.

now = System.system_time(:second)
check_rate_limited(category, now) or check_rate_limited(:global, now)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know this is really fast but given we're doing this for every event I wonder if perhaps a single ets:select pass to check both categories in one go would perform better. Maybe not but could be interesting to benchmark.

Copy link
Contributor Author

@dnsbty dnsbty Nov 24, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just set this up with Benchee. Please let me know if you see any issues with the logic of the benchmark:

Benchmark code
Mix.install([{:benchee, "~> 1.0"}])

# Create a table that will have a rate limit for errors, but no global limit
:ets.new(NoGlobalLimit, [:named_table, :public, :set, read_concurrency: true])
:ets.insert(NoGlobalLimit, {"error", 1764026065})

# Create a second table with a global rate limit and category rate limits
:ets.new(GlobalLimit, [:named_table, :public, :set, read_concurrency: true])
:ets.insert(GlobalLimit, {:global, 1764028122})
:ets.insert(GlobalLimit, {"error", 1764026065})

Benchee.run(
  %{
    "select (no results)" => fn ->
      :ets.select(NoGlobalLimit, [
        {{:global, :"$1"}, [], [{{:global, :"$1"}}]},
        {{"error", :"$1"}, [], [{{"error", :"$1"}}]}
      ])
    end,
    "select (category limit only)" => fn ->
      :ets.select(NoGlobalLimit, [
        {{:global, :"$1"}, [], [{{:global, :"$1"}}]},
        {{"error", :"$1"}, [], [{{"error", :"$1"}}]}
      ])
    end,
    "select (global limit only)" => fn ->
      :ets.select(GlobalLimit, [
        {{:global, :"$1"}, [], [{{:global, :"$1"}}]},
        {{"transaction", :"$1"}, [], [{{"transaction", :"$1"}}]}
      ])
    end,
    "select (global and category limits)" => fn ->
      :ets.select(GlobalLimit, [
        {{:global, :"$1"}, [], [{{:global, :"$1"}}]},
        {{"error", :"$1"}, [], [{{"error", :"$1"}}]}
      ])
    end,
    "lookup (no results)" => fn ->
      :ets.lookup(NoGlobalLimit, "transaction")
      :ets.lookup(NoGlobalLimit, :global)
    end,
    "lookup (category limit only)" => fn ->
      :ets.lookup(NoGlobalLimit, "error")
      # won't perform the global lookup since the category rate limit was found
    end,
    "lookup (global limit only)" => fn ->
      :ets.lookup(GlobalLimit, "error")
      :ets.lookup(GlobalLimit, :global)
    end,
    "lookup (global and category limits)" => fn ->
      :ets.lookup(GlobalLimit, "error")
      # won't perform the global lookup since the category rate limit was found
    end,
  }
)

And these were the results:

Full Benchee output
Operating System: macOS
CPU Information: Apple M1 Max
Number of Available Cores: 10
Available memory: 32 GB
Elixir 1.18.3
Erlang 27
JIT enabled: true

Benchmark suite executing with the following configuration:
warmup: 2 s
time: 5 s
memory time: 0 ns
reduction time: 0 ns
parallel: 1
inputs: none specified
Estimated total run time: 56 s
Excluding outliers: false

Benchmarking lookup (category limit only) ...
Benchmarking lookup (global and category limits) ...
Benchmarking lookup (global limit only) ...
Benchmarking lookup (no results) ...
Benchmarking select (category limit only) ...
Benchmarking select (global and category limits) ...
Benchmarking select (global limit only) ...
Benchmarking select (no results) ...
Calculating statistics...
Formatting results...

Name                                          ips        average  deviation         median         99th %
lookup (category limit only)               1.67 M        0.60 μs   ±847.42%        0.38 μs       11.92 μs
lookup (global and category limits)        1.64 M        0.61 μs   ±935.57%        0.42 μs       11.96 μs
lookup (no results)                        1.50 M        0.67 μs   ±846.72%        0.58 μs        1.63 μs
lookup (global limit only)                 1.42 M        0.71 μs   ±985.98%        0.58 μs        1.67 μs
select (category limit only)               0.50 M        1.98 μs   ±317.25%        1.79 μs        3.58 μs
select (no results)                        0.50 M        1.99 μs   ±340.78%        1.83 μs        3.58 μs
select (global and category limits)        0.50 M        2.01 μs   ±297.20%        1.88 μs        3.63 μs
select (global limit only)                 0.49 M        2.03 μs   ±313.39%        1.83 μs        3.67 μs

Comparison: 
lookup (category limit only)               1.67 M
lookup (global and category limits)        1.64 M - 1.02x slower +0.0109 μs
lookup (no results)                        1.50 M - 1.11x slower +0.0684 μs
lookup (global limit only)                 1.42 M - 1.18x slower +0.109 μs
select (category limit only)               0.50 M - 3.32x slower +1.38 μs
select (no results)                        0.50 M - 3.33x slower +1.39 μs
select (global and category limits)        0.50 M - 3.37x slower +1.41 μs
select (global limit only)                 0.49 M - 3.39x slower +1.43 μs
Comparison: 
lookup (category limit only)               1.67 M
lookup (global and category limits)        1.64 M - 1.02x slower +0.0109 μs
lookup (no results)                        1.50 M - 1.11x slower +0.0684 μs
lookup (global limit only)                 1.42 M - 1.18x slower +0.109 μs
select (category limit only)               0.50 M - 3.32x slower +1.38 μs
select (no results)                        0.50 M - 3.33x slower +1.39 μs
select (global and category limits)        0.50 M - 3.37x slower +1.41 μs
select (global limit only)                 0.49 M - 3.39x slower +1.43 μs

Based on that, it looks like it's going to be more performant to stick with the two lookups

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just now (sorry!) realized that the size of this ETS table is extremely contained—we'll never really go over one key per category, so even being very cautious we're not going over a few dozen rows in the ETS table. I benched with 20 rows in that table and, predictably, ets:select is every slower in comparison since it has to scan more rows. Well, better to be informed!

end

@doc """
Updates global rate limit from a Retry-After header value.
This is a fallback for when X-Sentry-Rate-Limits is not present.
Stores a global rate limit (:global key) that affects all categories.
## Examples
iex> RateLimiter.update_global_rate_limit(60)
:ok
"""
@spec update_global_rate_limit(pos_integer()) :: :ok
def update_global_rate_limit(retry_after_seconds) when is_integer(retry_after_seconds) do
now = System.system_time(:second)
expiry = now + retry_after_seconds
:ets.insert(@table, {:global, expiry})
:ok
end

@doc """
Updates rate limits from the X-Sentry-Rate-Limits header.
Parses the header value and stores expiry timestamps for each category.
Returns `:ok` regardless of parsing success.
## Examples
iex> RateLimiter.update_rate_limits("60:error;transaction")
:ok
"""
@spec update_rate_limits(String.t() | nil) :: :ok
def update_rate_limits(nil), do: :ok

def update_rate_limits(rate_limits_header) when is_binary(rate_limits_header) do
now = System.system_time(:second)
rate_limits = parse_rate_limits_header(rate_limits_header)

Enum.each(rate_limits, fn {category, retry_after_seconds} ->
expiry = now + retry_after_seconds
:ets.insert(@table, {category, expiry})
end)

:ok
end

## Private Helpers

@spec check_rate_limited(String.t() | :global, integer()) :: boolean()
defp check_rate_limited(category, time) do
case :ets.lookup(@table, category) do
[{^category, expiry}] when expiry > time -> true
_ -> false
end
end

# Parse X-Sentry-Rate-Limits header
# Format: "60:error;transaction:key, 2700:default:organization"
# Returns: [{category, retry_after_seconds}, ...]
@spec parse_rate_limits_header(String.t()) :: [{String.t() | :global, integer()}]
defp parse_rate_limits_header(header_value) do
header_value
|> String.split(",")
|> Enum.map(&String.trim/1)
|> Enum.flat_map(&parse_quota_limit/1)
end

@spec parse_quota_limit(String.t()) :: [{String.t() | :global, integer()}]
defp parse_quota_limit(quota_limit_str) do
{retry_after_str, rest} = quota_limit_str |> String.split(":") |> List.pop_at(0)

case parse_retry_after(retry_after_str) do
{:ok, retry_after} -> parse_categories(rest, retry_after)
:error -> []
end
end

@spec parse_retry_after(String.t() | nil) :: {:ok, integer()} | :error
defp parse_retry_after(nil), do: :error

defp parse_retry_after(retry_after_str) do
case Integer.parse(retry_after_str) do
{retry_after, ""} -> {:ok, retry_after}
_ -> :error
end
end

@spec parse_categories([String.t()], integer()) :: [{String.t() | :global, integer()}]
defp parse_categories([categories_str | _rest], retry_after) do
case String.split(categories_str, ";") do
[""] -> [{:global, retry_after}]
categories -> Enum.map(categories, fn cat -> {cat, retry_after} end)
end
end

defp parse_categories(_, _), do: []

@spec schedule_sweep() :: reference()
defp schedule_sweep do
Process.send_after(self(), :sweep, @sweep_interval_ms)
end
end
Loading
Loading