|
1 | 1 | defmodule Flume.Queue.Manager do |
2 | 2 | require Flume.Logger |
3 | 3 |
|
4 | | - alias Flume.{Config, Event, Logger, Instrumentation, Utils} |
| 4 | + alias Flume.{Config, Event, Logger, Instrumentation, Utils, Redis} |
5 | 5 | alias Flume.Redis.Job |
6 | 6 | alias Flume.Queue.Backoff |
7 | 7 | alias Flume.Support.Time, as: TimeExtension |
@@ -66,6 +66,20 @@ defmodule Flume.Queue.Manager do |
66 | 66 | schedule_job_at(queue_name, time_in_seconds, job) |
67 | 67 | end |
68 | 68 |
|
| 69 | + def job_counts(namespace, [_queue | _] = queues) do |
| 70 | + queues |
| 71 | + |> Enum.map(&(fully_qualified_queue_name(namespace, &1) |> Redis.Command.llen())) |
| 72 | + |> Redis.Client.pipeline() |
| 73 | + |> case do |
| 74 | + {:ok, counts} -> |
| 75 | + {:ok, Enum.zip(queues, counts)} |
| 76 | + |
| 77 | + {:error, reason} -> |
| 78 | + Logger.error("Error in getting job counts #{inspect(reason)}") |
| 79 | + {:error, reason} |
| 80 | + end |
| 81 | + end |
| 82 | + |
69 | 83 | def fetch_jobs( |
70 | 84 | namespace, |
71 | 85 | queue, |
@@ -263,6 +277,8 @@ defmodule Flume.Queue.Manager do |
263 | 277 | [scheduled_key(namespace), retry_key(namespace)] |
264 | 278 | end |
265 | 279 |
|
| 280 | + defp fully_qualified_queue_name(namespace, queue_name), do: "#{namespace}:queue:#{queue_name}" |
| 281 | + |
266 | 282 | def processing_key(namespace, queue), do: full_key(namespace, "queue:processing:#{queue}") |
267 | 283 |
|
268 | 284 | def rate_limit_key(namespace, queue, nil), do: full_key(namespace, "queue:limit:#{queue}") |
|
0 commit comments