|
1 | 1 | # frozen_string_literal: true
|
2 | 2 |
|
3 |
| -class SolidQueue::Job < SolidQueue::Record |
4 |
| - include Executable |
| 3 | +module SolidQueue |
| 4 | + class Job < Record |
| 5 | + include Executable |
5 | 6 |
|
6 |
| - if Gem::Version.new(Rails.version) >= Gem::Version.new("7.1") |
7 |
| - serialize :arguments, coder: JSON |
8 |
| - else |
9 |
| - serialize :arguments, JSON |
10 |
| - end |
11 |
| - |
12 |
| - DEFAULT_PRIORITY = 0 |
13 |
| - DEFAULT_QUEUE_NAME = "default" |
| 7 | + if Gem::Version.new(Rails.version) >= Gem::Version.new("7.1") |
| 8 | + serialize :arguments, coder: JSON |
| 9 | + else |
| 10 | + serialize :arguments, JSON |
| 11 | + end |
14 | 12 |
|
15 |
| - class << self |
16 |
| - def enqueue_all_active_jobs(active_jobs) |
17 |
| - scheduled_jobs, immediate_jobs = active_jobs.partition(&:scheduled_at) |
18 |
| - with_concurrency_limits, without_concurrency_limits = immediate_jobs.partition(&:concurrency_limited?) |
| 13 | + class << self |
| 14 | + def enqueue_all(active_jobs) |
| 15 | + scheduled_jobs, immediate_jobs = active_jobs.partition(&:scheduled_at) |
| 16 | + with_concurrency_limits, without_concurrency_limits = immediate_jobs.partition(&:concurrency_limited?) |
19 | 17 |
|
20 |
| - with_concurrency_limits.each do |active_job| |
21 |
| - enqueue_active_job(active_job) |
| 18 | + schedule_all_at_once(scheduled_jobs) |
| 19 | + enqueue_all_at_once(without_concurrency_limits) |
| 20 | + enqueue_one_by_one(with_concurrency_limits) |
22 | 21 | end
|
23 | 22 |
|
24 |
| - transaction do |
25 |
| - job_rows = scheduled_jobs.map { |job| attributes_from_active_job(job) } |
26 |
| - insert_all(job_rows) |
27 |
| - inserted_jobs = where(active_job_id: scheduled_jobs.map(&:job_id)) |
28 |
| - SolidQueue::ScheduledExecution.create_all_from_jobs(inserted_jobs) |
| 23 | + def schedule_all_at_once(active_jobs) |
| 24 | + transaction do |
| 25 | + inserted_jobs = create_all_from_active_jobs(active_jobs) |
| 26 | + schedule_all(inserted_jobs) |
| 27 | + end |
29 | 28 | end
|
30 | 29 |
|
31 |
| - transaction do |
32 |
| - job_rows = without_concurrency_limits.map { |job| attributes_from_active_job(job) } |
33 |
| - insert_all(job_rows) |
34 |
| - inserted_jobs = where(active_job_id: without_concurrency_limits.map(&:job_id)) |
35 |
| - SolidQueue::ReadyExecution.create_all_from_jobs(inserted_jobs) |
| 30 | + def enqueue_all_at_once(active_jobs) |
| 31 | + transaction do |
| 32 | + inserted_jobs = create_all_from_active_jobs(active_jobs) |
| 33 | + dispatch_all_at_once(inserted_jobs) |
| 34 | + end |
36 | 35 | end
|
37 |
| - end |
38 | 36 |
|
39 |
| - def enqueue_active_job(active_job, scheduled_at: Time.current) |
40 |
| - enqueue **attributes_from_active_job(active_job).reverse_merge(scheduled_at: scheduled_at) |
41 |
| - end |
| 37 | + def enqueue_one_by_one(active_jobs) |
| 38 | + active_jobs.each { |active_job| enqueue(active_job) } |
| 39 | + end |
42 | 40 |
|
43 |
| - private |
44 |
| - def enqueue(**kwargs) |
45 |
| - create!(**kwargs).tap do |
46 |
| - SolidQueue.logger.debug "[SolidQueue] Enqueued job #{kwargs}" |
| 41 | + def enqueue(active_job, scheduled_at: Time.current) |
| 42 | + create!(**attributes_from_active_job(active_job).reverse_merge(scheduled_at: scheduled_at)).tap do |job| |
| 43 | + active_job.provider_job_id = job.id |
47 | 44 | end
|
48 | 45 | end
|
49 | 46 |
|
50 |
| - def attributes_from_active_job(active_job) |
51 |
| - { |
52 |
| - queue_name: active_job.queue_name, |
53 |
| - active_job_id: active_job.job_id, |
54 |
| - priority: active_job.priority, |
55 |
| - scheduled_at: active_job.scheduled_at, |
56 |
| - class_name: active_job.class.name, |
57 |
| - arguments: active_job.serialize, |
58 |
| - concurrency_key: active_job.concurrency_key |
59 |
| - }.compact.with_defaults(defaults) |
60 |
| - end |
| 47 | + private |
| 48 | + DEFAULT_PRIORITY = 0 |
| 49 | + DEFAULT_QUEUE_NAME = "default" |
61 | 50 |
|
62 |
| - def defaults |
63 |
| - { queue_name: DEFAULT_QUEUE_NAME, priority: DEFAULT_PRIORITY } |
64 |
| - end |
| 51 | + def create_all_from_active_jobs(active_jobs) |
| 52 | + job_rows = active_jobs.map { |job| attributes_from_active_job(job) } |
| 53 | + insert_all(job_rows) |
| 54 | + where(active_job_id: active_jobs.map(&:job_id)) |
| 55 | + end |
| 56 | + |
| 57 | + def attributes_from_active_job(active_job) |
| 58 | + { |
| 59 | + queue_name: active_job.queue_name, |
| 60 | + active_job_id: active_job.job_id, |
| 61 | + priority: active_job.priority, |
| 62 | + scheduled_at: active_job.scheduled_at, |
| 63 | + class_name: active_job.class.name, |
| 64 | + arguments: active_job.serialize, |
| 65 | + concurrency_key: active_job.concurrency_key |
| 66 | + }.compact.with_defaults(defaults) |
| 67 | + end |
| 68 | + |
| 69 | + def defaults |
| 70 | + { queue_name: DEFAULT_QUEUE_NAME, priority: DEFAULT_PRIORITY } |
| 71 | + end |
| 72 | + end |
65 | 73 | end
|
66 | 74 | end
|
0 commit comments