From 54aea7507b589fecfbc3195dc9d2faeda87a83ec Mon Sep 17 00:00:00 2001 From: Tim Standen Date: Tue, 3 Jun 2025 12:58:39 -0700 Subject: [PATCH 01/43] [F] Replace sidekiq and zhong with good_job --- api/Gemfile | 5 +- api/Gemfile.lock | 27 +- api/Procfile | 2 +- .../annotation_jobs/adopt_or_orphan_job.rb | 2 +- api/app/jobs/application_job.rb | 1 + .../exportation/export_text_to_epub_v3_job.rb | 2 +- api/app/services/concerns/job_concurrency.rb | 35 +++ api/bin/{zhong => good_job} | 10 +- api/bin/sidekiq | 17 -- api/bin/sidekiq_dev | 6 - api/config/application.rb | 2 +- api/config/initializers/good_job.rb | 57 ++++ api/config/initializers/sidekiq_and_redis.rb | 9 - api/config/initializers/zhong.rb | 3 - api/config/routes.rb | 6 +- .../20250603170620_create_good_jobs.rb | 102 +++++++ api/db/structure.sql | 263 +++++++++++++++++- api/zhong.rb | 66 ----- docker-compose.yml | 40 +-- 19 files changed, 492 insertions(+), 163 deletions(-) create mode 100644 api/app/services/concerns/job_concurrency.rb rename api/bin/{zhong => good_job} (60%) mode change 100755 => 100644 delete mode 100755 api/bin/sidekiq delete mode 100755 api/bin/sidekiq_dev create mode 100644 api/config/initializers/good_job.rb delete mode 100644 api/config/initializers/sidekiq_and_redis.rb delete mode 100644 api/config/initializers/zhong.rb create mode 100644 api/db/migrate/20250603170620_create_good_jobs.rb delete mode 100644 api/zhong.rb diff --git a/api/Gemfile b/api/Gemfile index 14c638537c..84e96606c1 100644 --- a/api/Gemfile +++ b/api/Gemfile @@ -29,6 +29,7 @@ gem "cleanroom" gem "closure_tree", "~> 7.0" # Until Rails 7.1 gem "composite_primary_keys", "~> 14.0.10" +gem "connection_pool" gem "crass", "~> 1.0.5" gem "csl-styles", "~> 1.0" gem "cssbeautify" @@ -114,7 +115,6 @@ gem "scenic", "~> 1.4" gem "shrine", "~> 3.5.0" gem "shrine-google_cloud_storage", "~> 3.3" gem "shrine-tus", "~> 2.0" -gem "sidekiq", "< 6" gem "signet", "~> 0.10" gem "sinatra", "~>2.2" gem "sixarm_ruby_unaccent", "~> 1.2.2" @@ -134,7 +134,6 @@ gem "validates_email_format_of", "~> 1.0" gem "validate_url", "~> 1.0" gem "with_advisory_lock", "~> 4.0" gem "zaru", "~> 1.0.0" -gem "zhong", "~> 0.3" group :development, :test do gem "pry-byebug" @@ -174,3 +173,5 @@ group :test do gem "test-prof", "~> 1.0" gem "with_model", "~> 2.1" end + +gem "good_job", "~> 3.99" diff --git a/api/Gemfile.lock b/api/Gemfile.lock index 0d99aeac87..b57e2cf890 100644 --- a/api/Gemfile.lock +++ b/api/Gemfile.lock @@ -295,6 +295,8 @@ GEM equalizer (0.0.11) errbase (0.2.2) erubi (1.12.0) + et-orbi (1.2.11) + tzinfo factory_bot (6.2.1) activesupport (>= 5.0.0) factory_bot_rails (6.2.0) @@ -333,6 +335,9 @@ GEM format_engine (0.7.10) friendly_id (5.5.0) activerecord (>= 4.0.0) + fugit (1.11.1) + et-orbi (~> 1, >= 1.2.11) + raabro (~> 1.4) fuzzy_match (2.1.0) gems (1.2.0) geocoder (1.8.2) @@ -341,6 +346,13 @@ GEM rubyzip (> 1.1.1, < 2.4) globalid (1.2.1) activesupport (>= 6.1) + good_job (3.99.1) + activejob (>= 6.0.0) + activerecord (>= 6.0.0) + concurrent-ruby (>= 1.0.2) + fugit (>= 1.1) + railties (>= 6.0.0) + thor (>= 0.14.1) google-api-client (0.53.0) google-apis-core (~> 0.1) google-apis-generator (~> 0.1) @@ -581,6 +593,7 @@ GEM public_suffix (5.0.3) puma (6.4.0) nio4r (~> 2.0) + raabro (1.4.0) racc (1.7.1) rack (2.2.13) rack-attack (6.7.0) @@ -743,11 +756,6 @@ GEM down (~> 5.0) http (>= 3.2, < 6) shrine (>= 3.0.0.rc, < 4) - sidekiq (5.2.10) - connection_pool (~> 2.2, >= 2.2.2) - rack (~> 2.0) - rack-protection (>= 1.5.0) - redis (~> 4.5, < 4.6.0) signet (0.18.0) addressable (~> 2.8) faraday (>= 0.17.5, < 3.a) @@ -848,11 +856,6 @@ GEM yard_types (0.2.0) zaru (1.0.0) zeitwerk (2.6.12) - zhong (0.3.0) - activesupport - redis - suo - tzinfo PLATFORMS ruby @@ -883,6 +886,7 @@ DEPENDENCIES cleanroom closure_tree (~> 7.0) composite_primary_keys (~> 14.0.10) + connection_pool crass (~> 1.0.5) csl-styles (~> 1.0) css_parser (~> 1.0) @@ -915,6 +919,7 @@ DEPENDENCIES fuzzy_match (~> 2.1.0) geocoder (= 1.8.2) gepub (~> 1.0.4) + good_job (~> 3.99) google-api-client (~> 0.53.0) google_drive (~> 3.0) has_scope (~> 0.8.1) @@ -990,7 +995,6 @@ DEPENDENCIES shrine (~> 3.5.0) shrine-google_cloud_storage (~> 3.3) shrine-tus (~> 2.0) - sidekiq (< 6) signet (~> 0.10) simplecov sinatra (~> 2.2) @@ -1018,7 +1022,6 @@ DEPENDENCIES with_advisory_lock (~> 4.0) with_model (~> 2.1) zaru (~> 1.0.0) - zhong (~> 0.3) BUNDLED WITH 2.4.19 diff --git a/api/Procfile b/api/Procfile index fd66461212..183b15aedc 100644 --- a/api/Procfile +++ b/api/Procfile @@ -1,3 +1,3 @@ api: ./bin/puma -worker: ./bin/sidekiq_dev +worker: ./bin/good_job start --probe-port 7001 zhong: ./bin/zhong zhong.rb diff --git a/api/app/jobs/annotation_jobs/adopt_or_orphan_job.rb b/api/app/jobs/annotation_jobs/adopt_or_orphan_job.rb index ea5f55ff33..cd768f4f3c 100644 --- a/api/app/jobs/annotation_jobs/adopt_or_orphan_job.rb +++ b/api/app/jobs/annotation_jobs/adopt_or_orphan_job.rb @@ -5,7 +5,7 @@ module AnnotationJobs class AdoptOrOrphanJob < ApplicationJob queue_as :annotations - unique :until_executed, lock_ttl: 15.minutes, on_conflict: :log + unique_job! by: :job # @param [Annotation] annotation # @return [void] diff --git a/api/app/jobs/application_job.rb b/api/app/jobs/application_job.rb index 00159feeec..6af7ff4025 100644 --- a/api/app/jobs/application_job.rb +++ b/api/app/jobs/application_job.rb @@ -1,6 +1,7 @@ # frozen_string_literal: true class ApplicationJob < ActiveJob::Base + include JobConcurrency def match_result(result, &) Dry::Matcher::ResultMatcher.(result, &) end diff --git a/api/app/jobs/packaging/exportation/export_text_to_epub_v3_job.rb b/api/app/jobs/packaging/exportation/export_text_to_epub_v3_job.rb index fcfb72cf56..ef09aa8ccb 100644 --- a/api/app/jobs/packaging/exportation/export_text_to_epub_v3_job.rb +++ b/api/app/jobs/packaging/exportation/export_text_to_epub_v3_job.rb @@ -7,7 +7,7 @@ class ExportTextToEpubV3Job < ApplicationJob around_perform :advisory_locked! - unique :until_executed, lock_ttl: 15.minutes, on_conflict: :log + unique_job! by: :job queue_as :default diff --git a/api/app/services/concerns/job_concurrency.rb b/api/app/services/concerns/job_concurrency.rb new file mode 100644 index 0000000000..535cef1fda --- /dev/null +++ b/api/app/services/concerns/job_concurrency.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +module JobConcurrency + extend ActiveSupport::Concern + + included do + include GoodJob::ActiveJobExtensions::Concurrency + end + + module ClassMethods + # @param [:model, :job, Proc] by + # @return [void] + def unique_job!(by: :job) + key = unique_job_key_for(by:) + + good_job_control_concurrency_with( + total_limit: 1, + key:, + ) + end + + # @param [:model, :job, Proc] by + # @return [Proc] + def unique_job_key_for(by: nil) + return by if by.kind_of?(Proc) + + case by + in :model + -> { "#{self.class.name}-#{arguments.first.to_global_id}" } + in :job + -> { self.class.name.to_s } + end + end + end +end diff --git a/api/bin/zhong b/api/bin/good_job old mode 100755 new mode 100644 similarity index 60% rename from api/bin/zhong rename to api/bin/good_job index 1160b6bc15..1ee66a5773 --- a/api/bin/zhong +++ b/api/bin/good_job @@ -4,18 +4,16 @@ # # This file was generated by Bundler. # -# The application 'zhong' is installed as part of a gem, and +# The application 'good_job' is installed as part of a gem, and # this file is here to facilitate running it. # -require "pathname" -ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../../Gemfile", - Pathname.new(__FILE__).realpath) +ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../Gemfile", __dir__) bundle_binstub = File.expand_path("bundle", __dir__) if File.file?(bundle_binstub) - if File.read(bundle_binstub, 300) =~ /This file was generated by Bundler/ + if File.read(bundle_binstub, 300).include?("This file was generated by Bundler") load(bundle_binstub) else abort("Your `bin/bundle` was not generated by Bundler, so this binstub cannot run. @@ -26,4 +24,4 @@ end require "rubygems" require "bundler/setup" -load Gem.bin_path("zhong", "zhong") +load Gem.bin_path("good_job", "good_job") diff --git a/api/bin/sidekiq b/api/bin/sidekiq deleted file mode 100755 index 83aab28f8c..0000000000 --- a/api/bin/sidekiq +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env ruby -# frozen_string_literal: true -# -# This file was generated by Bundler. -# -# The application 'sidekiq' is installed as part of a gem, and -# this file is here to facilitate running it. -# - -require "pathname" -ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../../Gemfile", - Pathname.new(__FILE__).realpath) - -require "rubygems" -require "bundler/setup" - -load Gem.bin_path("sidekiq", "sidekiq") diff --git a/api/bin/sidekiq_dev b/api/bin/sidekiq_dev deleted file mode 100755 index 660dc48b5b..0000000000 --- a/api/bin/sidekiq_dev +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -. ../.env -if [ "$RAILS_ENV" == "development" ]; then - exec ./bin/sidekiq -fi diff --git a/api/config/application.rb b/api/config/application.rb index fed5b185ee..552e2b06d2 100644 --- a/api/config/application.rb +++ b/api/config/application.rb @@ -104,7 +104,7 @@ class Application < Rails::Application g.orm :active_record, primary_key_type: :uuid end - config.active_job.queue_adapter = :sidekiq + config.active_job.queue_adapter = :good_job config.active_record.schema_format = :sql diff --git a/api/config/initializers/good_job.rb b/api/config/initializers/good_job.rb new file mode 100644 index 0000000000..0eede40a78 --- /dev/null +++ b/api/config/initializers/good_job.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +Rails.application.configure do + # Future-proofing + config.good_job.smaller_number_is_higher_priority = true + + queues = [ + "maintenance:1", + "default,mailers,processing,sorting,url_validity:3", + ].join(?;) + + config.good_job.cleanup_preserved_jobs_before_seconds_ago = 43_200 # half-day + config.good_job.preserve_job_records = true + config.good_job.retry_on_unhandled_error = false + config.good_job.on_thread_error = ->(exception) { Rollbar.error(exception) } + config.good_job.execution_mode = :external + config.good_job.queues = queues + config.good_job.max_threads = 5 + config.good_job.poll_interval = 30 # seconds + config.good_job.shutdown_timeout = 25 # seconds + config.good_job.enable_cron = true + config.good_job.cron = { + "feed.refresh_feed": { + cron: "0 * * * *", + class: "FeedRefreshJob", + description: "Refresh the opportunity feed.", + }, + "opportunities.archive_expired": { + cron: "55 * * * *", + class: "Opportunities::ArchiveExpiredJob", + description: "Automatically archive expired opportunities", + }, + "opportunities.calculate_all_sorting": { + cron: "0 1,13 * * *", + class: "Opportunities::CalculateAllSortingJob", + description: "Calculate sorting for all opportunities", + }, + "opportunities.check_all_url_validity": { + cron: "0 0,4,8,12,16,20 * * *", + class: "Opportunities::CheckAllURLValidityJob", + description: "Check the URL Validity of all opportunities", + }, + "resources.maintain": { + cron: "*/5 * * * *", + class: "Resources::MaintainJob", + description: "Maintain resources", + }, + "sources.calculate_counter_caches": { + cron: "*/10 * * * *", + class: "Sources::CalculateCounterCaches", + description: "Refresh counter caches on Sources", + }, + } + + config.good_job.dashboard_default_locale = :en +end + diff --git a/api/config/initializers/sidekiq_and_redis.rb b/api/config/initializers/sidekiq_and_redis.rb deleted file mode 100644 index e3b77c9096..0000000000 --- a/api/config/initializers/sidekiq_and_redis.rb +++ /dev/null @@ -1,9 +0,0 @@ -# frozen_string_literal: true - -Sidekiq.configure_server do |config| - config.redis = ManifoldEnv.redis.sidekiq_options -end - -Sidekiq.configure_client do |config| - config.redis = ManifoldEnv.redis.sidekiq_options -end diff --git a/api/config/initializers/zhong.rb b/api/config/initializers/zhong.rb deleted file mode 100644 index a4affd86e7..0000000000 --- a/api/config/initializers/zhong.rb +++ /dev/null @@ -1,3 +0,0 @@ -# frozen_string_literal: true - -require "#{Rails.root}/zhong.rb" diff --git a/api/config/routes.rb b/api/config/routes.rb index b6fcf6e64e..ab93495378 100644 --- a/api/config/routes.rb +++ b/api/config/routes.rb @@ -1,8 +1,5 @@ # frozen_string_literal: true -require "sidekiq/web" -require "zhong/web" - Rails.application.routes.draw do concern :flaggable do resource :flags, controller: "/api/v1/flags", only: [:create, :destroy] do @@ -19,8 +16,7 @@ end constraints ->(request) { AuthConstraint.new(request).admin? || Rails.env.development? } do - mount Sidekiq::Web => "/api/sidekiq" - mount Zhong::Web, at: "/api/zhong" + mount GoodJob::Engine => '/api/good_job' end get "auth/:provider/callback", to: "oauth#authorize" diff --git a/api/db/migrate/20250603170620_create_good_jobs.rb b/api/db/migrate/20250603170620_create_good_jobs.rb new file mode 100644 index 0000000000..6cfd24455c --- /dev/null +++ b/api/db/migrate/20250603170620_create_good_jobs.rb @@ -0,0 +1,102 @@ +# frozen_string_literal: true + +class CreateGoodJobs < ActiveRecord::Migration[7.0] + def change + # Uncomment for Postgres v12 or earlier to enable gen_random_uuid() support + # enable_extension 'pgcrypto' + + create_table :good_jobs, id: :uuid do |t| + t.text :queue_name + t.integer :priority + t.jsonb :serialized_params + t.datetime :scheduled_at + t.datetime :performed_at + t.datetime :finished_at + t.text :error + + t.timestamps + + t.uuid :active_job_id + t.text :concurrency_key + t.text :cron_key + t.uuid :retried_good_job_id + t.datetime :cron_at + + t.uuid :batch_id + t.uuid :batch_callback_id + + t.boolean :is_discrete + t.integer :executions_count + t.text :job_class + t.integer :error_event, limit: 2 + t.text :labels, array: true + t.uuid :locked_by_id + t.datetime :locked_at + end + + create_table :good_job_batches, id: :uuid do |t| + t.timestamps + t.text :description + t.jsonb :serialized_properties + t.text :on_finish + t.text :on_success + t.text :on_discard + t.text :callback_queue_name + t.integer :callback_priority + t.datetime :enqueued_at + t.datetime :discarded_at + t.datetime :finished_at + end + + create_table :good_job_executions, id: :uuid do |t| + t.timestamps + + t.uuid :active_job_id, null: false + t.text :job_class + t.text :queue_name + t.jsonb :serialized_params + t.datetime :scheduled_at + t.datetime :finished_at + t.text :error + t.integer :error_event, limit: 2 + t.text :error_backtrace, array: true + t.uuid :process_id + t.interval :duration + end + + create_table :good_job_processes, id: :uuid do |t| + t.timestamps + t.jsonb :state + t.integer :lock_type, limit: 2 + end + + create_table :good_job_settings, id: :uuid do |t| + t.timestamps + t.text :key + t.jsonb :value + t.index :key, unique: true + end + + add_index :good_jobs, :scheduled_at, where: "(finished_at IS NULL)", name: :index_good_jobs_on_scheduled_at + add_index :good_jobs, [:queue_name, :scheduled_at], where: "(finished_at IS NULL)", name: :index_good_jobs_on_queue_name_and_scheduled_at + add_index :good_jobs, [:active_job_id, :created_at], name: :index_good_jobs_on_active_job_id_and_created_at + add_index :good_jobs, :concurrency_key, where: "(finished_at IS NULL)", name: :index_good_jobs_on_concurrency_key_when_unfinished + add_index :good_jobs, [:cron_key, :created_at], where: "(cron_key IS NOT NULL)", name: :index_good_jobs_on_cron_key_and_created_at_cond + add_index :good_jobs, [:cron_key, :cron_at], where: "(cron_key IS NOT NULL)", unique: true, name: :index_good_jobs_on_cron_key_and_cron_at_cond + add_index :good_jobs, [:finished_at], where: "retried_good_job_id IS NULL AND finished_at IS NOT NULL", name: :index_good_jobs_jobs_on_finished_at + add_index :good_jobs, [:priority, :created_at], order: { priority: "DESC NULLS LAST", created_at: :asc }, + where: "finished_at IS NULL", name: :index_good_jobs_jobs_on_priority_created_at_when_unfinished + add_index :good_jobs, [:priority, :created_at], order: { priority: "ASC NULLS LAST", created_at: :asc }, + where: "finished_at IS NULL", name: :index_good_job_jobs_for_candidate_lookup + add_index :good_jobs, [:batch_id], where: "batch_id IS NOT NULL" + add_index :good_jobs, [:batch_callback_id], where: "batch_callback_id IS NOT NULL" + add_index :good_jobs, :labels, using: :gin, where: "(labels IS NOT NULL)", name: :index_good_jobs_on_labels + + add_index :good_job_executions, [:active_job_id, :created_at], name: :index_good_job_executions_on_active_job_id_and_created_at + add_index :good_jobs, [:priority, :scheduled_at], order: { priority: "ASC NULLS LAST", scheduled_at: :asc }, + where: "finished_at IS NULL AND locked_by_id IS NULL", name: :index_good_jobs_on_priority_scheduled_at_unfinished_unlocked + add_index :good_jobs, :locked_by_id, + where: "locked_by_id IS NOT NULL", name: "index_good_jobs_on_locked_by_id" + add_index :good_job_executions, [:process_id, :created_at], name: :index_good_job_executions_on_process_id_and_created_at + end +end diff --git a/api/db/structure.sql b/api/db/structure.sql index 090f84fbb1..f807b1c7a9 100644 --- a/api/db/structure.sql +++ b/api/db/structure.sql @@ -1675,6 +1675,107 @@ CREATE SEQUENCE public.friendly_id_slugs_id_seq ALTER SEQUENCE public.friendly_id_slugs_id_seq OWNED BY public.friendly_id_slugs.id; +-- +-- Name: good_job_batches; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.good_job_batches ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + created_at timestamp(6) without time zone NOT NULL, + updated_at timestamp(6) without time zone NOT NULL, + description text, + serialized_properties jsonb, + on_finish text, + on_success text, + on_discard text, + callback_queue_name text, + callback_priority integer, + enqueued_at timestamp(6) without time zone, + discarded_at timestamp(6) without time zone, + finished_at timestamp(6) without time zone +); + + +-- +-- Name: good_job_executions; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.good_job_executions ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + created_at timestamp(6) without time zone NOT NULL, + updated_at timestamp(6) without time zone NOT NULL, + active_job_id uuid NOT NULL, + job_class text, + queue_name text, + serialized_params jsonb, + scheduled_at timestamp(6) without time zone, + finished_at timestamp(6) without time zone, + error text, + error_event smallint, + error_backtrace text[], + process_id uuid, + duration interval +); + + +-- +-- Name: good_job_processes; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.good_job_processes ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + created_at timestamp(6) without time zone NOT NULL, + updated_at timestamp(6) without time zone NOT NULL, + state jsonb, + lock_type smallint +); + + +-- +-- Name: good_job_settings; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.good_job_settings ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + created_at timestamp(6) without time zone NOT NULL, + updated_at timestamp(6) without time zone NOT NULL, + key text, + value jsonb +); + + +-- +-- Name: good_jobs; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.good_jobs ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + queue_name text, + priority integer, + serialized_params jsonb, + scheduled_at timestamp(6) without time zone, + performed_at timestamp(6) without time zone, + finished_at timestamp(6) without time zone, + error text, + created_at timestamp(6) without time zone NOT NULL, + updated_at timestamp(6) without time zone NOT NULL, + active_job_id uuid, + concurrency_key text, + cron_key text, + retried_good_job_id uuid, + cron_at timestamp(6) without time zone, + batch_id uuid, + batch_callback_id uuid, + is_discrete boolean, + executions_count integer, + job_class text, + error_event smallint, + labels text[], + locked_by_id uuid, + locked_at timestamp(6) without time zone +); + + -- -- Name: identities; Type: TABLE; Schema: public; Owner: - -- @@ -3706,6 +3807,46 @@ ALTER TABLE ONLY public.friendly_id_slugs ADD CONSTRAINT friendly_id_slugs_pkey PRIMARY KEY (id); +-- +-- Name: good_job_batches good_job_batches_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.good_job_batches + ADD CONSTRAINT good_job_batches_pkey PRIMARY KEY (id); + + +-- +-- Name: good_job_executions good_job_executions_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.good_job_executions + ADD CONSTRAINT good_job_executions_pkey PRIMARY KEY (id); + + +-- +-- Name: good_job_processes good_job_processes_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.good_job_processes + ADD CONSTRAINT good_job_processes_pkey PRIMARY KEY (id); + + +-- +-- Name: good_job_settings good_job_settings_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.good_job_settings + ADD CONSTRAINT good_job_settings_pkey PRIMARY KEY (id); + + +-- +-- Name: good_jobs good_jobs_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.good_jobs + ADD CONSTRAINT good_jobs_pkey PRIMARY KEY (id); + + -- -- Name: identities identities_pkey; Type: CONSTRAINT; Schema: public; Owner: - -- @@ -4925,6 +5066,125 @@ CREATE INDEX index_friendly_id_slugs_on_sluggable_id ON public.friendly_id_slugs CREATE INDEX index_friendly_id_slugs_on_sluggable_type ON public.friendly_id_slugs USING btree (sluggable_type); +-- +-- Name: index_good_job_executions_on_active_job_id_and_created_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_job_executions_on_active_job_id_and_created_at ON public.good_job_executions USING btree (active_job_id, created_at); + + +-- +-- Name: index_good_job_executions_on_process_id_and_created_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_job_executions_on_process_id_and_created_at ON public.good_job_executions USING btree (process_id, created_at); + + +-- +-- Name: index_good_job_jobs_for_candidate_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_job_jobs_for_candidate_lookup ON public.good_jobs USING btree (priority, created_at) WHERE (finished_at IS NULL); + + +-- +-- Name: index_good_job_settings_on_key; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_good_job_settings_on_key ON public.good_job_settings USING btree (key); + + +-- +-- Name: index_good_jobs_jobs_on_finished_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_jobs_jobs_on_finished_at ON public.good_jobs USING btree (finished_at) WHERE ((retried_good_job_id IS NULL) AND (finished_at IS NOT NULL)); + + +-- +-- Name: index_good_jobs_jobs_on_priority_created_at_when_unfinished; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_jobs_jobs_on_priority_created_at_when_unfinished ON public.good_jobs USING btree (priority DESC NULLS LAST, created_at) WHERE (finished_at IS NULL); + + +-- +-- Name: index_good_jobs_on_active_job_id_and_created_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_jobs_on_active_job_id_and_created_at ON public.good_jobs USING btree (active_job_id, created_at); + + +-- +-- Name: index_good_jobs_on_batch_callback_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_jobs_on_batch_callback_id ON public.good_jobs USING btree (batch_callback_id) WHERE (batch_callback_id IS NOT NULL); + + +-- +-- Name: index_good_jobs_on_batch_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_jobs_on_batch_id ON public.good_jobs USING btree (batch_id) WHERE (batch_id IS NOT NULL); + + +-- +-- Name: index_good_jobs_on_concurrency_key_when_unfinished; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_jobs_on_concurrency_key_when_unfinished ON public.good_jobs USING btree (concurrency_key) WHERE (finished_at IS NULL); + + +-- +-- Name: index_good_jobs_on_cron_key_and_created_at_cond; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_jobs_on_cron_key_and_created_at_cond ON public.good_jobs USING btree (cron_key, created_at) WHERE (cron_key IS NOT NULL); + + +-- +-- Name: index_good_jobs_on_cron_key_and_cron_at_cond; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_good_jobs_on_cron_key_and_cron_at_cond ON public.good_jobs USING btree (cron_key, cron_at) WHERE (cron_key IS NOT NULL); + + +-- +-- Name: index_good_jobs_on_labels; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_jobs_on_labels ON public.good_jobs USING gin (labels) WHERE (labels IS NOT NULL); + + +-- +-- Name: index_good_jobs_on_locked_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_jobs_on_locked_by_id ON public.good_jobs USING btree (locked_by_id) WHERE (locked_by_id IS NOT NULL); + + +-- +-- Name: index_good_jobs_on_priority_scheduled_at_unfinished_unlocked; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_jobs_on_priority_scheduled_at_unfinished_unlocked ON public.good_jobs USING btree (priority, scheduled_at) WHERE ((finished_at IS NULL) AND (locked_by_id IS NULL)); + + +-- +-- Name: index_good_jobs_on_queue_name_and_scheduled_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_jobs_on_queue_name_and_scheduled_at ON public.good_jobs USING btree (queue_name, scheduled_at) WHERE (finished_at IS NULL); + + +-- +-- Name: index_good_jobs_on_scheduled_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_good_jobs_on_scheduled_at ON public.good_jobs USING btree (scheduled_at) WHERE (finished_at IS NULL); + + -- -- Name: index_identities_on_uid_and_provider; Type: INDEX; Schema: public; Owner: - -- @@ -7830,7 +8090,8 @@ INSERT INTO "schema_migrations" (version) VALUES ('20250521211043'), ('20250527180248'), ('20250528002025'), -('20250530205742'), +('20250530205742') +('20250603170620'), ('20250603192547'), ('20250609191642'), ('20250609192241'), diff --git a/api/zhong.rb b/api/zhong.rb deleted file mode 100644 index 56bcd94f36..0000000000 --- a/api/zhong.rb +++ /dev/null @@ -1,66 +0,0 @@ -# frozen_string_literal: true - -require "./config/boot" -require "./config/environment" - -Zhong.redis = Redis.new(url: ENV["RAILS_REDIS_URL"]) - -Zhong.schedule do - category "caches" do - every(10.minutes, "refresh_all_flag_status_data") do - ::Flags::RefreshAllStatusDataJob.perform_later - end - - every(15.minutes, "refresh_project_collections") do - ::ProjectCollectionJobs::QueueCacheCollectionProjectsJob.perform_later - end - end - - category "entitlements" do - every(15.minutes, "audit") do - Entitlements::AuditJob.perform_later - end - - every(1.hour, "check_expiration") do - Entitlements::CheckExpirationJob.perform_later - end - end - - category "uploads" do - every(1.day, "expire_shrine_cache", at: "22:00", tz: "America/Los_Angeles") do - ExpireShrineCacheJob.perform_later - end - - every(1.day, "expire_tus_uploads", at: "23:00", tz: "America/Los_Angeles") do - ExpireTusUploadsJob.perform_later - end - end - - category "notification" do - every(1.day, "enqueue_user_daily_digests", at: "06:00") do - Notifications::EnqueueDigestsJob.perform_later "daily" - end - - every(1.week, "enqueue_user_weekly_digests", at: "Sunday 06:00") do - Notifications::EnqueueDigestsJob.perform_later "weekly" - end - end - - category "packaging" do - every(5.minutes, "automate_text_exports") do - Texts::AutomateExportsJob.perform_later - end - - every(1.day, "prune_text_exports", at: "01:00") do - TextExports::PruneJob.perform_later - end - - every(1.day, "prune_project_exports", at: "01:05") do - ProjectExports::PruneJob.perform_later - end - - every(4.hours, "prune_bag_it_temporary_directory") do - Packaging::BagItSpec::PruneTemporaryDirectoryJob.perform_later - end - end -end diff --git a/docker-compose.yml b/docker-compose.yml index 5735ea4991..562562d325 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -157,15 +157,13 @@ services: build: context: api dockerfile: docker/development/Dockerfile - command: "bin/sidekiq" + command: bin/good_job start --probe-port 7001 depends_on: postgres: condition: service_healthy migrations: condition: service_completed_successfully - minio-client: - condition: service_completed_successfully - redis: + web: condition: service_healthy env_file: - ./docker/manifold.env @@ -182,34 +180,12 @@ services: - rails-log:/srv/app/log - rails-tmp:/srv/app/tmp - uploads:/srv/app/public/system - clock: - build: - context: api - dockerfile: docker/development/Dockerfile - command: bin/zhong zhong.rb - depends_on: - postgres: - condition: service_healthy - migrations: - condition: service_completed_successfully - redis: - condition: service_healthy - env_file: - - ./docker/manifold.env - logging: - driver: json-file - options: - max-size: "10m" - max-file: "10" - restart: unless-stopped - volumes: - - ./api/public/system/:/srv/app/public/system - - ./api/:/srv/app - - bundle-cache:/bundle - - rails-data:/srv/app/data - - rails-log:/srv/app/log - - rails-tmp:/srv/app/tmp - - uploads:/srv/app/public/system + healthcheck: + test: ["CMD", "curl", "-f", "-s", "-o", "/dev/null", "http://localhost:7001/status/started"] + interval: 10s + timeout: 10s + retries: 3 + start_period: 15s spec: build: context: api From bba7826055d0c4a8171790f2c02ab2231bf0e625 Mon Sep 17 00:00:00 2001 From: Tim Standen Date: Tue, 3 Jun 2025 14:18:47 -0700 Subject: [PATCH 02/43] [F] Convert cron scheduler from zhong to good_job --- api/config/initializers/good_job.rb | 63 ++++++++++++++++++----------- 1 file changed, 40 insertions(+), 23 deletions(-) diff --git a/api/config/initializers/good_job.rb b/api/config/initializers/good_job.rb index 0eede40a78..ab61f38672 100644 --- a/api/config/initializers/good_job.rb +++ b/api/config/initializers/good_job.rb @@ -20,38 +20,55 @@ config.good_job.shutdown_timeout = 25 # seconds config.good_job.enable_cron = true config.good_job.cron = { - "feed.refresh_feed": { + "caches.refresh_project_collections": { + cron: "*/15 * * * *", + class: "::ProjectCollectionJobs::QueueCacheCollectionProjectsJob" + }, + "caches.refresh_all_flag_status_data": { + cron: "*/10 * * * *", + class: "::Flags::RefreshAllStatusDataJob" + }, + "entitlements.audit": { + cron: "*/15 * * * *", + class: "Entitlements::AuditJob" + }, + "entitlements.check_expiration": { cron: "0 * * * *", - class: "FeedRefreshJob", - description: "Refresh the opportunity feed.", + class: "Entitlements::CheckExpirationJob" }, - "opportunities.archive_expired": { - cron: "55 * * * *", - class: "Opportunities::ArchiveExpiredJob", - description: "Automatically archive expired opportunities", + "uploads.expire_shrine_cache": { + cron: "* * 0 * *", + class: "ExpireShrineCacheJob" }, - "opportunities.calculate_all_sorting": { - cron: "0 1,13 * * *", - class: "Opportunities::CalculateAllSortingJob", - description: "Calculate sorting for all opportunities", + "uploads.expire_tus_uploads": { + cron: "* * 0 * *", + class: "ExpireTusUploadsJob" }, - "opportunities.check_all_url_validity": { - cron: "0 0,4,8,12,16,20 * * *", - class: "Opportunities::CheckAllURLValidityJob", - description: "Check the URL Validity of all opportunities", + "notifications.enqueue_user_daily_digests": { + cron: "* 6 * * *", + class: "Notifications::EnqueueDigestsJob" }, - "resources.maintain": { + "notifications.enqueue_user_weekly_digests": { + cron: "* 6 * * 0", + class: "Notifications::EnqueueDigestsJob" + }, + "packaging.automate_text_exports": { cron: "*/5 * * * *", - class: "Resources::MaintainJob", - description: "Maintain resources", + class: "Texts::AutomateExportsJob" }, - "sources.calculate_counter_caches": { - cron: "*/10 * * * *", - class: "Sources::CalculateCounterCaches", - description: "Refresh counter caches on Sources", + "packaging.prune_text_exports": { + cron: "* 1 * * *", + class: "TextExports::PruneJob" }, + "packaging.prune_project_exports": { + cron: "5 1 * * *", + class: "ProjectExports::PruneJob" + }, + "packaging.prune_bag_it_temporary_directory": { + cron: "* */4 * * *", + class: "Packaging::BagItSpec::PruneTemporaryDirectoryJob" + } } config.good_job.dashboard_default_locale = :en end - From a7d3bf8dc5edef01c59e35edd67dd6ed0154f026 Mon Sep 17 00:00:00 2001 From: Tim Standen Date: Wed, 4 Jun 2025 09:15:55 -0700 Subject: [PATCH 03/43] [F] Update queues --- api/config/initializers/good_job.rb | 3 +-- api/config/sidekiq.yml | 7 ------- 2 files changed, 1 insertion(+), 9 deletions(-) delete mode 100644 api/config/sidekiq.yml diff --git a/api/config/initializers/good_job.rb b/api/config/initializers/good_job.rb index ab61f38672..4933bc8e05 100644 --- a/api/config/initializers/good_job.rb +++ b/api/config/initializers/good_job.rb @@ -5,8 +5,7 @@ config.good_job.smaller_number_is_higher_priority = true queues = [ - "maintenance:1", - "default,mailers,processing,sorting,url_validity:3", + "+default,mailers,deletions,low_priority,ahoy,annotations:10", ].join(?;) config.good_job.cleanup_preserved_jobs_before_seconds_ago = 43_200 # half-day diff --git a/api/config/sidekiq.yml b/api/config/sidekiq.yml deleted file mode 100644 index e156d892ed..0000000000 --- a/api/config/sidekiq.yml +++ /dev/null @@ -1,7 +0,0 @@ -:queues: - - default - - mailers - - deletions - - low_priority - - ahoy - - annotations From 9d6b135c7877f23d7831c85072e2693745f32c13 Mon Sep 17 00:00:00 2001 From: Tim Standen Date: Wed, 1 Oct 2025 14:52:35 -0700 Subject: [PATCH 04/43] [B] Fix minutes in some time schedules --- api/config/initializers/good_job.rb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/api/config/initializers/good_job.rb b/api/config/initializers/good_job.rb index 4933bc8e05..bb45d4d741 100644 --- a/api/config/initializers/good_job.rb +++ b/api/config/initializers/good_job.rb @@ -36,19 +36,19 @@ class: "Entitlements::CheckExpirationJob" }, "uploads.expire_shrine_cache": { - cron: "* * 0 * *", + cron: "0 22 * * *", class: "ExpireShrineCacheJob" }, "uploads.expire_tus_uploads": { - cron: "* * 0 * *", + cron: "0 23 * * *", class: "ExpireTusUploadsJob" }, "notifications.enqueue_user_daily_digests": { - cron: "* 6 * * *", + cron: "0 6 * * *", class: "Notifications::EnqueueDigestsJob" }, "notifications.enqueue_user_weekly_digests": { - cron: "* 6 * * 0", + cron: "0 6 * * 0", class: "Notifications::EnqueueDigestsJob" }, "packaging.automate_text_exports": { @@ -56,7 +56,7 @@ class: "Texts::AutomateExportsJob" }, "packaging.prune_text_exports": { - cron: "* 1 * * *", + cron: "0 1 * * *", class: "TextExports::PruneJob" }, "packaging.prune_project_exports": { @@ -64,7 +64,7 @@ class: "ProjectExports::PruneJob" }, "packaging.prune_bag_it_temporary_directory": { - cron: "* */4 * * *", + cron: "0 */4 * * *", class: "Packaging::BagItSpec::PruneTemporaryDirectoryJob" } } From 111e23082af31e31ac38e99ec9d3e6dc71681da2 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Mon, 13 Oct 2025 10:48:44 -0700 Subject: [PATCH 05/43] [C] Temporarily remove metadown gem - Metadown is often failing to load. Should find an alternative, or fork --- api/Gemfile | 2 +- api/Gemfile.lock | 9 --------- api/config/initializers/30_monkeypatches.rb | 2 +- 3 files changed, 2 insertions(+), 11 deletions(-) diff --git a/api/Gemfile b/api/Gemfile index 84e96606c1..49fecd5714 100644 --- a/api/Gemfile +++ b/api/Gemfile @@ -74,7 +74,7 @@ gem "lograge", "~> 0.10" gem "marcel", "~> 1.0.0" gem "maxminddb" gem "memoist", "~> 0.16.0" -gem "metadown", git: "https://github.com/steveklabnik/metadown.git", tag: "v1.1.0.beta" +# gem "metadown", git: "https://github.com/steveklabnik/metadown.git", tag: "v1.1.0.beta" gem "mime-types", "~> 3.3" gem "money", "~> 6.0" gem "namae", "~>1.0" diff --git a/api/Gemfile.lock b/api/Gemfile.lock index b57e2cf890..738cd3cfaf 100644 --- a/api/Gemfile.lock +++ b/api/Gemfile.lock @@ -6,14 +6,6 @@ GIT fast_jsonapi (1.6.0) activesupport (>= 4.2) -GIT - remote: https://github.com/steveklabnik/metadown.git - revision: 34b79ad7e0bdf560ce13e384caa1626569397798 - tag: v1.1.0.beta - specs: - metadown (1.1.0.beta) - redcarpet - GEM remote: https://rubygems.org/ specs: @@ -936,7 +928,6 @@ DEPENDENCIES marcel (~> 1.0.0) maxminddb memoist (~> 0.16.0) - metadown! mime-types (~> 3.3) money (~> 6.0) namae (~> 1.0) diff --git a/api/config/initializers/30_monkeypatches.rb b/api/config/initializers/30_monkeypatches.rb index baa466a024..342fdd88c2 100644 --- a/api/config/initializers/30_monkeypatches.rb +++ b/api/config/initializers/30_monkeypatches.rb @@ -1,7 +1,7 @@ # frozen_string_literal: true require Rails.root.join("lib", "patches", "better_interactions").to_s -require Rails.root.join("lib", "patches", "metadown_custom_renderer").to_s +# require Rails.root.join("lib", "patches", "metadown_custom_renderer").to_s require Rails.root.join("lib", "patches", "better_enums").to_s require Rails.root.join("lib", "patches", "for_shrine").to_s require Rails.root.join("lib", "patches", "friendly_id_uniqueness").to_s From ddd79516bfc7c9e97014f2fb5f0126a36161cc23 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Mon, 13 Oct 2025 10:49:05 -0700 Subject: [PATCH 06/43] [C] Replace Bitnami Redis in docker-compose; support REDIS_URL --- api/lib/manifold_env/redis_config.rb | 2 +- docker-compose.yml | 26 +++++++++++++++++++++----- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/api/lib/manifold_env/redis_config.rb b/api/lib/manifold_env/redis_config.rb index a830f6d8a8..ca45bedf2d 100644 --- a/api/lib/manifold_env/redis_config.rb +++ b/api/lib/manifold_env/redis_config.rb @@ -50,7 +50,7 @@ def default_namespace_prefix end def default_url - ENV["BOXEN_REDIS_URL"] || ENV["RAILS_REDIS_URL"] || "redis://127.0.0.1:6379" + ENV["REDIS_URL"] || ENV["BOXEN_REDIS_URL"] || ENV["RAILS_REDIS_URL"] || "redis://127.0.0.1:6379" end end end diff --git a/docker-compose.yml b/docker-compose.yml index 562562d325..9b1c7fe3bd 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -23,7 +23,7 @@ services: timeout: 5s retries: 5 redis: - image: bitnami/redis:6.2.7-debian-10-r34 + image: redis:6.2.7 platform: linux/amd64 environment: - "ALLOW_EMPTY_PASSWORD=yes" @@ -41,7 +41,7 @@ services: timeout: 5s retries: 5 test-redis: - image: bitnami/redis:6.2.7-debian-10-r34 + image: redis:6.2.7 platform: linux/amd64 environment: - "ALLOW_EMPTY_PASSWORD=yes" @@ -113,12 +113,27 @@ services: restart: "no" volumes: - ./docker/minio/client:/root/.mc + client: + tty: true + stdin_open: true + build: + context: client + target: prod + ports: + - "13100:13100" # Rescue + - "13101:13101" # SSR + - "13102:13102" # Webpack dev server + env_file: + - ./docker/local.env + restart: unless-stopped + volumes: + - ./client:/srv/app + - node_modules:/srv/app/node_modules web: tty: true stdin_open: true build: context: api - dockerfile: docker/development/Dockerfile depends_on: postgres: condition: service_healthy @@ -156,7 +171,6 @@ services: worker: build: context: api - dockerfile: docker/development/Dockerfile command: bin/good_job start --probe-port 7001 depends_on: postgres: @@ -189,7 +203,7 @@ services: spec: build: context: api - dockerfile: docker/development/Dockerfile + command: tail -f /dev/null depends_on: postgres: @@ -236,3 +250,5 @@ volumes: driver: local uploads: driver: local + node_modules: + driver: local \ No newline at end of file From fd66215a9bb7303574a9747a7774523aa585071f Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Mon, 13 Oct 2025 10:50:14 -0700 Subject: [PATCH 07/43] [C] Add basic prod API Dockerfile --- api/Dockerfile | 69 ++++++++++++++++++++++++++++++++++++++++++++++++ api/bin/good_job | 0 2 files changed, 69 insertions(+) create mode 100644 api/Dockerfile mode change 100644 => 100755 api/bin/good_job diff --git a/api/Dockerfile b/api/Dockerfile new file mode 100644 index 0000000000..b16c97c0d7 --- /dev/null +++ b/api/Dockerfile @@ -0,0 +1,69 @@ +FROM ruby:3.2-bullseye AS base + +RUN mkdir -pv /bundle/bin + +ENV BUNDLE_PATH=/bundle \ + BUNDLE_BIN=/bundle/bin \ + GEM_HOME=/bundle \ + RACK_ENV=development \ + RAILS_ENV=development \ + RAILS_LOG_TO_STDOUT=true \ + RAILS_SERVE_STATIC_FILES=true \ + RAILS_MIN_THREADS=16 \ + RAILS_MAX_THREADS=16 \ + WORKER_COUNT=0 + +COPY docker/install_node_16.sh /usr/local/src/install_node_16.sh + +RUN bash /usr/local/src/install_node_16.sh + +RUN apt-get update -qq && apt-get install -y -qq --no-install-recommends \ + build-essential \ + ca-certificates \ + curl \ + ghostscript \ + gnupg gnupg2 \ + imagemagick \ + libglib2.0-0 libglib2.0-dev \ + libicu-dev \ + libjemalloc2 \ + libpoppler-glib8 \ + librsvg2-bin \ + libsndfile1-dev \ + libvips \ + libvips-dev \ + mediainfo \ + nodejs \ + postgresql-common \ + pandoc + +RUN DEBIAN_FRONTEND=noninteractive /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y + +RUN apt-get update -qq && apt-get install -y -qq --no-install-recommends postgresql-client-13 + +RUN npm install -g mammoth@^1.4.16 wscat@^6.0.1 + +RUN sed -i '//d' /etc/ImageMagick-6/policy.xml + +COPY ./ /srv/app/ + +WORKDIR /srv/app +COPY Gemfile /srv/app/Gemfile +COPY Gemfile.lock /srv/app/Gemfile.lock + +COPY docker/entrypoint.sh /usr/bin/ +RUN chmod +x /usr/bin/entrypoint.sh +ENTRYPOINT ["entrypoint.sh"] + +ENV MAMMOTH_PATH=/usr/bin/mammoth +ENV PATH="${BUNDLE_BIN}:${PATH}" +ENV LD_PRELOAD=libjemalloc.so.2 + +RUN bundle install + +CMD ["bin/puma", "-C", "config/puma.rb"] + +# There are currently no differences between dev and prod Dockerfiles, but these are here to provide parity with the client Dockerfile +FROM base AS dev + +FROM base AS prod \ No newline at end of file diff --git a/api/bin/good_job b/api/bin/good_job old mode 100644 new mode 100755 From bcfefdca6a57b40b0727c73eb137f206dfad2100 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Mon, 13 Oct 2025 10:51:09 -0700 Subject: [PATCH 08/43] [C] Add basic prod client Dockerfile; fix starting client in docker --- client/Dockerfile | 20 ++++++++++++++++++++ client/script/build-browser-config.js | 5 +++-- client/src/config/environment/index.js | 2 +- 3 files changed, 24 insertions(+), 3 deletions(-) create mode 100644 client/Dockerfile diff --git a/client/Dockerfile b/client/Dockerfile new file mode 100644 index 0000000000..71660125db --- /dev/null +++ b/client/Dockerfile @@ -0,0 +1,20 @@ +FROM node:16.20.2 AS base + +WORKDIR /srv/app +COPY ./ /srv/app + +RUN yarn + +FROM base AS dev + + RUN yarn build:dev + + EXPOSE 3010 3011 3012 + + CMD ["yarn", "run", "watch"] + +FROM base AS prod + + RUN yarn build:prod + + CMD ["yarn", "run", "start-docker"] \ No newline at end of file diff --git a/client/script/build-browser-config.js b/client/script/build-browser-config.js index 7499a803bd..51489a9514 100644 --- a/client/script/build-browser-config.js +++ b/client/script/build-browser-config.js @@ -10,8 +10,9 @@ const output = compileEnv(template); const writePath = `${paths.build}/www/`; /* eslint-disable no-console */ -mkdirp(writePath, function writeConfig(err) { - if (err) return console.error("Unable to mkdir at " + writePath); +mkdirp(writePath).then(_ => { fs.writeFileSync(`${paths.build}/www/browser.config.js`, output); +}).catch(_ => { + console.error("Unable to mkdir at " + writePath + ": " + err); }); /* eslint-enable no-console */ diff --git a/client/src/config/environment/index.js b/client/src/config/environment/index.js index db9c4e4430..c7cfcfd221 100644 --- a/client/src/config/environment/index.js +++ b/client/src/config/environment/index.js @@ -1,6 +1,6 @@ const isServer = typeof __SERVER__ === "undefined" || __SERVER__; const isBrowser = !isServer; -const name = process.env.NODE_ENV.toLowerCase() || "development"; +const name = process.env.NODE_ENV?.toLowerCase() || "development"; const skipSSR = process.env.SKIP_SSR || false; const baseConfig = { From 37ef697ac2ce519ef2c27ac1f664a8de3a5410cf Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Mon, 13 Oct 2025 10:52:26 -0700 Subject: [PATCH 09/43] [C] Temporarily remove rate limit exception for client - This must be re-added, but has to support internal hostname --- api/config/initializers/rack_attack.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/config/initializers/rack_attack.rb b/api/config/initializers/rack_attack.rb index 75c82b6af1..366bd5a8fe 100644 --- a/api/config/initializers/rack_attack.rb +++ b/api/config/initializers/rack_attack.rb @@ -4,7 +4,7 @@ # We want to ensure that the public IP used by the client is never # accidentally blocklisted or throttled. unless Rails.env.development? || Rails.env.test? - ManifoldEnv.rate_limiting.derive_public_ips! Rails.application.config.manifold.domain + # ManifoldEnv.rate_limiting.derive_public_ips! Rails.application.config.manifold.domain end ManifoldEnv.rate_limiting.public_ips.each do |public_ip| From 7693c4c77edd54404cb377d431e573708c321c2d Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Mon, 13 Oct 2025 10:53:36 -0700 Subject: [PATCH 10/43] [E] Make db thread pool configurable --- api/config/database.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/api/config/database.yml b/api/config/database.yml index 75b3e46915..9bb75a86a1 100644 --- a/api/config/database.yml +++ b/api/config/database.yml @@ -1,19 +1,17 @@ <% - user = ENV["RAILS_DB_USER"] pass = ENV["RAILS_DB_PASS"] host = ENV["RAILS_DB_HOST"] || 'localhost' port = ENV["RAILS_DB_PORT"] || 5432 db = ENV["RAILS_DB_NAME"] || 'manifold_development' test_db = ENV["RAILS_TEST_DB_NAME"] || 'manifold_test' - %> common: &common adapter: postgresql host: <%= host %> encoding: unicode - pool: 50 + pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 50 } %> port: <%= port %> user: <%= user %> database: <%= db %> From 90ad9dbdc77162654047a72d0bf92b81ab42237d Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Mon, 13 Oct 2025 10:54:06 -0700 Subject: [PATCH 11/43] [B] Require tus/s3 in storage/factory --- api/db/structure.sql | 168 +++++++++++++++++-------------------- api/lib/storage/factory.rb | 2 + docker/local.env | 4 +- docker/manifold.env | 1 + 4 files changed, 83 insertions(+), 92 deletions(-) diff --git a/api/db/structure.sql b/api/db/structure.sql index f807b1c7a9..170bd410ad 100644 --- a/api/db/structure.sql +++ b/api/db/structure.sql @@ -397,7 +397,7 @@ CREATE TABLE public.action_callouts ( -- CREATE TABLE public.analytics_events ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, visit_id uuid NOT NULL, name character varying NOT NULL, properties jsonb DEFAULT '{}'::jsonb NOT NULL, @@ -410,7 +410,7 @@ CREATE TABLE public.analytics_events ( -- CREATE TABLE public.analytics_visits ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, visit_token character varying NOT NULL, visitor_token character varying NOT NULL, ip character varying, @@ -503,7 +503,7 @@ CREATE TABLE public.comments ( -- CREATE TABLE public.reading_group_memberships ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, user_id uuid, reading_group_id uuid, created_at timestamp without time zone NOT NULL, @@ -646,8 +646,8 @@ CREATE VIEW public.annotation_reading_group_memberships AS CREATE TABLE public.ar_internal_metadata ( key character varying NOT NULL, value character varying, - created_at timestamp(6) without time zone NOT NULL, - updated_at timestamp(6) without time zone NOT NULL + created_at timestamp without time zone NOT NULL, + updated_at timestamp without time zone NOT NULL ); @@ -656,7 +656,7 @@ CREATE TABLE public.ar_internal_metadata ( -- CREATE TABLE public.cached_external_source_links ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, cached_external_source_id uuid NOT NULL, text_id uuid NOT NULL, created_at timestamp without time zone NOT NULL, @@ -669,7 +669,7 @@ CREATE TABLE public.cached_external_source_links ( -- CREATE TABLE public.cached_external_sources ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, url text NOT NULL, source_identifier text NOT NULL, kind text DEFAULT 'unknown'::text NOT NULL, @@ -979,7 +979,7 @@ CREATE TABLE public.content_blocks ( -- CREATE TABLE public.entitlement_roles ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, name text NOT NULL, kind text DEFAULT 'unknown'::text NOT NULL, created_at timestamp without time zone NOT NULL, @@ -1049,7 +1049,7 @@ CREATE TABLE public.entitlement_transitions ( -- CREATE TABLE public.entitlement_user_links ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, entitlement_id uuid NOT NULL, user_id uuid NOT NULL, created_at timestamp without time zone NOT NULL, @@ -1062,7 +1062,7 @@ CREATE TABLE public.entitlement_user_links ( -- CREATE TABLE public.entitlements ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, target_type character varying NOT NULL, target_id uuid NOT NULL, entitler_id uuid NOT NULL, @@ -1086,14 +1086,14 @@ CREATE TABLE public.entitlements ( -- CREATE TABLE public.journal_issues ( - id uuid DEFAULT gen_random_uuid() NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL, journal_id uuid NOT NULL, journal_volume_id uuid, creator_id uuid, - fa_cache jsonb DEFAULT '{}'::jsonb NOT NULL, number character varying DEFAULT ''::character varying NOT NULL, + fa_cache jsonb DEFAULT '{}'::jsonb NOT NULL, sort_title integer DEFAULT 0 NOT NULL, pending_sort_title integer ); @@ -1104,9 +1104,9 @@ CREATE TABLE public.journal_issues ( -- CREATE TABLE public.journal_volumes ( - id uuid DEFAULT gen_random_uuid() NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL, journal_id uuid NOT NULL, creator_id uuid, number integer, @@ -1277,7 +1277,7 @@ CREATE MATERIALIZED VIEW public.entitlement_grant_audits AS -- CREATE TABLE public.entitlement_import_row_transitions ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, to_state character varying NOT NULL, metadata jsonb DEFAULT '{}'::jsonb, sort_key integer NOT NULL, @@ -1293,7 +1293,7 @@ CREATE TABLE public.entitlement_import_row_transitions ( -- CREATE TABLE public.entitlement_import_rows ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, entitlement_import_id uuid NOT NULL, entitlement_id uuid, subject_type character varying, @@ -1304,8 +1304,8 @@ CREATE TABLE public.entitlement_import_rows ( email public.citext, expires_on date, messages text[] DEFAULT '{}'::text[] NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL, pending_entitlement_id uuid, expiration text, first_name text, @@ -1318,7 +1318,7 @@ CREATE TABLE public.entitlement_import_rows ( -- CREATE TABLE public.entitlement_import_transitions ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, to_state character varying NOT NULL, metadata jsonb DEFAULT '{}'::jsonb, sort_key integer NOT NULL, @@ -1334,14 +1334,14 @@ CREATE TABLE public.entitlement_import_transitions ( -- CREATE TABLE public.entitlement_imports ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, creator_id uuid, name public.citext NOT NULL, file_data jsonb, entitlement_import_rows_count bigint DEFAULT 0 NOT NULL, messages text[] DEFAULT '{}'::text[] NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL ); @@ -1350,7 +1350,7 @@ CREATE TABLE public.entitlement_imports ( -- CREATE TABLE public.reading_groups ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, name character varying, privacy character varying DEFAULT 'private'::character varying, invitation_code character varying, @@ -1458,7 +1458,7 @@ ALTER SEQUENCE public.entitlement_transitions_id_seq OWNED BY public.entitlement -- CREATE TABLE public.entitlers ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, entity_type character varying NOT NULL, entity_id uuid NOT NULL, name text NOT NULL, @@ -1501,7 +1501,7 @@ CREATE TABLE public.events ( -- CREATE TABLE public.export_targets ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, strategy text DEFAULT 'unknown'::text NOT NULL, name text NOT NULL, slug text NOT NULL, @@ -1516,7 +1516,7 @@ CREATE TABLE public.export_targets ( -- CREATE TABLE public.user_collected_composite_entries ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, user_id uuid NOT NULL, collectable_type character varying NOT NULL, collectable_id uuid NOT NULL, @@ -1660,7 +1660,6 @@ CREATE TABLE public.friendly_id_slugs ( -- CREATE SEQUENCE public.friendly_id_slugs_id_seq - AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -1898,8 +1897,8 @@ CREATE TABLE public.ingestions ( CREATE TABLE public.journal_subjects ( id bigint NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL, journal_id uuid NOT NULL, subject_id uuid NOT NULL ); @@ -1929,9 +1928,9 @@ ALTER SEQUENCE public.journal_subjects_id_seq OWNED BY public.journal_subjects.i -- CREATE TABLE public.journals ( - id uuid DEFAULT gen_random_uuid() NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL, title character varying, subtitle character varying, description text, @@ -2045,7 +2044,6 @@ CREATE TABLE public.pages ( -- CREATE SEQUENCE public.pages_id_seq - AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2065,14 +2063,14 @@ ALTER SEQUENCE public.pages_id_seq OWNED BY public.pages.id; -- CREATE TABLE public.pending_entitlement_transitions ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, pending_entitlement_id uuid NOT NULL, most_recent boolean NOT NULL, sort_key integer NOT NULL, to_state character varying NOT NULL, metadata jsonb, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL ); @@ -2081,7 +2079,7 @@ CREATE TABLE public.pending_entitlement_transitions ( -- CREATE TABLE public.pending_entitlements ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, creator_id uuid, entitlement_id uuid, user_id uuid, @@ -2094,8 +2092,8 @@ CREATE TABLE public.pending_entitlements ( first_name text, last_name text, messages text[] DEFAULT '{}'::text[] NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL ); @@ -2162,7 +2160,6 @@ CREATE TABLE public.project_collection_subjects ( -- CREATE SEQUENCE public.project_collection_subjects_id_seq - AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2182,7 +2179,7 @@ ALTER SEQUENCE public.project_collection_subjects_id_seq OWNED BY public.project -- CREATE TABLE public.project_exports ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, project_id uuid NOT NULL, export_kind text DEFAULT 'unknown'::text NOT NULL, fingerprint text NOT NULL, @@ -2218,7 +2215,7 @@ CREATE VIEW public.project_export_statuses AS -- CREATE TABLE public.project_exportation_transitions ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, to_state text NOT NULL, metadata jsonb DEFAULT '{}'::jsonb, sort_key integer NOT NULL, @@ -2234,7 +2231,7 @@ CREATE TABLE public.project_exportation_transitions ( -- CREATE TABLE public.project_exportations ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, project_id uuid NOT NULL, export_target_id uuid NOT NULL, project_export_id uuid, @@ -2262,7 +2259,6 @@ CREATE TABLE public.project_subjects ( -- CREATE SEQUENCE public.project_subjects_id_seq - AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2312,7 +2308,7 @@ CREATE VIEW public.project_summaries AS -- CREATE TABLE public.reading_group_categories ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, "position" integer, title text NOT NULL, @@ -2330,7 +2326,7 @@ CREATE TABLE public.reading_group_categories ( -- CREATE TABLE public.reading_group_composite_entries ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, collectable_type character varying NOT NULL, collectable_id uuid NOT NULL, @@ -2391,7 +2387,7 @@ CREATE VIEW public.reading_group_collections AS -- CREATE TABLE public.reading_group_projects ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, project_id uuid NOT NULL, reading_group_category_id uuid, @@ -2406,7 +2402,7 @@ CREATE TABLE public.reading_group_projects ( -- CREATE TABLE public.reading_group_resource_collections ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, resource_collection_id uuid NOT NULL, reading_group_category_id uuid, @@ -2421,7 +2417,7 @@ CREATE TABLE public.reading_group_resource_collections ( -- CREATE TABLE public.reading_group_resources ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, resource_id uuid NOT NULL, reading_group_category_id uuid, @@ -2436,7 +2432,7 @@ CREATE TABLE public.reading_group_resources ( -- CREATE TABLE public.reading_group_texts ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, text_id uuid NOT NULL, reading_group_category_id uuid, @@ -2492,13 +2488,13 @@ CREATE VIEW public.reading_group_counts AS -- CREATE TABLE public.reading_group_journal_issues ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, journal_issue_id uuid NOT NULL, reading_group_category_id uuid, "position" integer, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL ); @@ -2507,11 +2503,11 @@ CREATE TABLE public.reading_group_journal_issues ( -- CREATE TABLE public.reading_group_kinds ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, name text NOT NULL, slug text NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL ); @@ -2538,13 +2534,13 @@ CREATE VIEW public.reading_group_membership_counts AS -- CREATE TABLE public.reading_group_text_sections ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, text_section_id uuid NOT NULL, reading_group_category_id uuid, "position" integer, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL ); @@ -2632,7 +2628,6 @@ CREATE TABLE public.resource_import_row_transitions ( -- CREATE SEQUENCE public.resource_import_row_transitions_id_seq - AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2686,7 +2681,6 @@ CREATE TABLE public.resource_import_transitions ( -- CREATE SEQUENCE public.resource_import_transitions_id_seq - AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2855,7 +2849,6 @@ CREATE TABLE public.settings ( -- CREATE SEQUENCE public.settings_id_seq - AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2909,7 +2902,7 @@ CREATE TABLE public.subjects ( -- CREATE TABLE public.system_entitlements ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, kind text NOT NULL, created_at timestamp without time zone NOT NULL, updated_at timestamp without time zone NOT NULL @@ -2937,7 +2930,6 @@ CREATE TABLE public.taggings ( -- CREATE SEQUENCE public.taggings_id_seq - AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2968,7 +2960,6 @@ CREATE TABLE public.tags ( -- CREATE SEQUENCE public.tags_id_seq - AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2988,7 +2979,7 @@ ALTER SEQUENCE public.tags_id_seq OWNED BY public.tags.id; -- CREATE TABLE public.text_exports ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, text_id uuid NOT NULL, export_kind text DEFAULT 'unknown'::text NOT NULL, fingerprint text NOT NULL, @@ -3107,7 +3098,6 @@ CREATE TABLE public.text_section_stylesheets ( -- CREATE SEQUENCE public.text_section_stylesheets_id_seq - AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -3302,11 +3292,11 @@ CREATE TABLE public.upgrade_results ( -- CREATE TABLE public.user_collected_journal_issues ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, user_id uuid NOT NULL, journal_issue_id uuid NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL ); @@ -3315,11 +3305,11 @@ CREATE TABLE public.user_collected_journal_issues ( -- CREATE TABLE public.user_collected_projects ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, user_id uuid NOT NULL, project_id uuid NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL ); @@ -3328,11 +3318,11 @@ CREATE TABLE public.user_collected_projects ( -- CREATE TABLE public.user_collected_resource_collections ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, user_id uuid NOT NULL, resource_collection_id uuid NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL ); @@ -3341,11 +3331,11 @@ CREATE TABLE public.user_collected_resource_collections ( -- CREATE TABLE public.user_collected_resources ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, user_id uuid NOT NULL, resource_id uuid NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL ); @@ -3354,11 +3344,11 @@ CREATE TABLE public.user_collected_resources ( -- CREATE TABLE public.user_collected_text_sections ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, user_id uuid NOT NULL, text_section_id uuid NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL ); @@ -3367,11 +3357,11 @@ CREATE TABLE public.user_collected_text_sections ( -- CREATE TABLE public.user_collected_texts ( - id uuid DEFAULT gen_random_uuid() NOT NULL, + id uuid DEFAULT public.gen_random_uuid() NOT NULL, user_id uuid NOT NULL, text_id uuid NOT NULL, - created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL + created_at timestamp(6) without time zone DEFAULT now() NOT NULL, + updated_at timestamp(6) without time zone DEFAULT now() NOT NULL ); @@ -8090,7 +8080,7 @@ INSERT INTO "schema_migrations" (version) VALUES ('20250521211043'), ('20250527180248'), ('20250528002025'), -('20250530205742') +('20250530205742'), ('20250603170620'), ('20250603192547'), ('20250609191642'), diff --git a/api/lib/storage/factory.rb b/api/lib/storage/factory.rb index c4ae8cf363..55898b3727 100644 --- a/api/lib/storage/factory.rb +++ b/api/lib/storage/factory.rb @@ -6,6 +6,8 @@ require "shrine/storage/google_cloud_storage" require "shrine/storage/s3" +require "tus/storage/s3" + require_relative "types" require_relative "strategy" require_relative "tus_gcs" diff --git a/docker/local.env b/docker/local.env index f978031c15..cffafee94e 100644 --- a/docker/local.env +++ b/docker/local.env @@ -13,9 +13,7 @@ CLIENT_URL=http://localhost:13100 CLIENT_BROWSER_API_URL=http://localhost:13110 CLIENT_BROWSER_API_CABLE_URL=http://localhost:13120 -CLIENT_SERVER_API_URL=http://localhost:13110 - -ELASTICSEARCH_URL=http://elasticsearch:9200 +CLIENT_SERVER_API_URL=http://web:13110 CLIENT_SERVER_PROXIES=true diff --git a/docker/manifold.env b/docker/manifold.env index 9b77854204..9c367fbca6 100644 --- a/docker/manifold.env +++ b/docker/manifold.env @@ -1,5 +1,6 @@ ########################################################################################## # Manifold Service Configuration +# For Development Only ########################################################################################## DOMAIN=127.0.0.1:13100 From 31700246085291f31da55a142a6ccbd54a845985 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 15 Oct 2025 09:58:30 -0700 Subject: [PATCH 12/43] [C] Whitelist DO internal IP ranges in Rack Attack - Fetching client public IP is still disabled. This will need to be conditionally supported. --- api/config/initializers/rack_attack.rb | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/api/config/initializers/rack_attack.rb b/api/config/initializers/rack_attack.rb index 366bd5a8fe..8ae08481f1 100644 --- a/api/config/initializers/rack_attack.rb +++ b/api/config/initializers/rack_attack.rb @@ -1,5 +1,12 @@ # frozen_string_literal: true +INTERNAL_IP_RANGES = [ + "10.229.0.0/16", + "10.244.0.0/16", + "10.245.0.0/16", + "10.246.0.0/16" +] + # :nocov: # We want to ensure that the public IP used by the client is never # accidentally blocklisted or throttled. @@ -7,6 +14,8 @@ # ManifoldEnv.rate_limiting.derive_public_ips! Rails.application.config.manifold.domain end +INTERNAL_IP_RANGES.each { |ip| Rack::Attack.safelist_ip(ip) } + ManifoldEnv.rate_limiting.public_ips.each do |public_ip| Rack::Attack.safelist_ip public_ip end From 17e23158428eae7930bca376de00522204c26096 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 15 Oct 2025 10:00:05 -0700 Subject: [PATCH 13/43] [E] Improve S3 support - Improves passing bucket/prefix args - Makes all ACLs public; this may need to be adjusted later --- api/app/uploaders/concerns/shared_uploader.rb | 11 +++++++++-- api/db/structure.sql | 7 +++++++ api/lib/storage/factory.rb | 16 +++++++++++++++- docker-compose.yml | 2 +- docker/manifold.env | 4 +--- 5 files changed, 33 insertions(+), 7 deletions(-) diff --git a/api/app/uploaders/concerns/shared_uploader.rb b/api/app/uploaders/concerns/shared_uploader.rb index 2a5e7589f4..0fe33eac33 100644 --- a/api/app/uploaders/concerns/shared_uploader.rb +++ b/api/app/uploaders/concerns/shared_uploader.rb @@ -22,12 +22,19 @@ module SharedUploader included do plugin :add_metadata - plugin :url_options, cache: URL_OPTIONS, store: URL_OPTIONS + plugin :url_options, cache: Storage::Factory.url_options, store: Storage::Factory.url_options + + upload_options = { cache: { acl: "public-read" }, store: { acl: "public-read" } } # In our specs we often attach files from fixtures. If we let Shrine move them, they're # not available for other specs. Our import services also attach files from the file system, # which should remain in place after the import runs. Until we sort out these issues, we # should copy rather than move files into the cache. - plugin :upload_options, cache: { move: false }, store: { move: true } if Storage::Factory.store_supports_move? + if Storage::Factory.store_supports_move? + upload_options[:cache][:move] = false + upload_options[:store][:move] = true + end + plugin :upload_options, **upload_options + plugin :pretty_location add_metadata :sha256 do |io, _context| diff --git a/api/db/structure.sql b/api/db/structure.sql index 170bd410ad..a660678a58 100644 --- a/api/db/structure.sql +++ b/api/db/structure.sql @@ -1,3 +1,8 @@ +\restrict mRJh1bEMqIKlORifWLRPjref0qy1FtwUYDQiKi0d19Z2JfhrL9GFesgbZeG4R02 + +-- Dumped from database version 13.22 +-- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) + SET statement_timeout = 0; SET lock_timeout = 0; SET idle_in_transaction_session_timeout = 0; @@ -7728,6 +7733,8 @@ ALTER TABLE ONLY public.reading_group_composite_entries -- PostgreSQL database dump complete -- +\unrestrict mRJh1bEMqIKlORifWLRPjref0qy1FtwUYDQiKi0d19Z2JfhrL9GFesgbZeG4R02 + SET search_path TO "$user", public; INSERT INTO "schema_migrations" (version) VALUES diff --git a/api/lib/storage/factory.rb b/api/lib/storage/factory.rb index 55898b3727..ce8e18b346 100644 --- a/api/lib/storage/factory.rb +++ b/api/lib/storage/factory.rb @@ -168,6 +168,12 @@ def tus_server_s3_storage ) end + def url_options + { + host: asset_host + }.compact + end + private def file_storage(path, prefix) @@ -198,7 +204,7 @@ def store_s3_options end def s3_storage(bucket, prefix) - Shrine::Storage::S3.new(bucket:, **store_s3_options) + Shrine::Storage::S3.new(**store_s3_options, **{ bucket:, prefix: }.compact, public: true) end def test_storage(path, prefix) @@ -208,6 +214,14 @@ def test_storage(path, prefix) def test? Rails.env.test? end + + def asset_host + if primary_store.file? + Rails.configuration.manifold.api_url&.sub(%r{/\z}, "") || "" + else + UploadConfig.asset_host || S3Config.endpoint + end + end end end end diff --git a/docker-compose.yml b/docker-compose.yml index 9b1c7fe3bd..fcf1a15a97 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -118,7 +118,7 @@ services: stdin_open: true build: context: client - target: prod + target: dev ports: - "13100:13100" # Rescue - "13101:13101" # SSR diff --git a/docker/manifold.env b/docker/manifold.env index 9c367fbca6..336dc63aff 100644 --- a/docker/manifold.env +++ b/docker/manifold.env @@ -39,6 +39,4 @@ PGDATABASE=manifold_development S3_ENDPOINT=http://minio:9000 S3_ACCESS_KEY_ID=minio S3_SECRET_ACCESS_KEY=minio123 -UPLOAD_BUCKET=manifold-storage - -MANIFOLD_SETTINGS_STORAGE_MIRROR=s3 +UPLOAD_BUCKET=manifold-storage \ No newline at end of file From 0ecef1bf369865471c9605a99578dc9b466978ad Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 15 Oct 2025 10:03:28 -0700 Subject: [PATCH 14/43] [C] Add necessary middleware for Good Job dashboard - This should be isolated to just the necessary routes --- api/config/application.rb | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/api/config/application.rb b/api/config/application.rb index 552e2b06d2..e7bcdd78fd 100644 --- a/api/config/application.rb +++ b/api/config/application.rb @@ -83,6 +83,11 @@ class Application < Rails::Application # Skip views, helpers and assets when generating a new resource. config.api_only = true + config.middleware.use Rack::MethodOverride + config.middleware.use ActionDispatch::Flash + config.middleware.use ActionDispatch::Cookies + config.middleware.use ActionDispatch::Session::CookieStore + config.eager_load_paths += [ "#{config.root}/app/jobs", "#{config.root}/app/models", From c8d5f5d61e51571577ab81b96f299281a88ebf42 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 15 Oct 2025 10:04:27 -0700 Subject: [PATCH 15/43] [F] Basic CLI tool for migration setup --- api/.gitignore | 2 + api/config/puma.rb | 7 +- api/lib/tasks/manifold_migrator.thor | 340 +++++++++++++++++++++++++++ 3 files changed, 347 insertions(+), 2 deletions(-) create mode 100644 api/lib/tasks/manifold_migrator.thor diff --git a/api/.gitignore b/api/.gitignore index 4930d183c8..906d344d27 100644 --- a/api/.gitignore +++ b/api/.gitignore @@ -40,3 +40,5 @@ backup* /config/*.local.yml /config/credentials/local.* + +.env \ No newline at end of file diff --git a/api/config/puma.rb b/api/config/puma.rb index 7261e89bbd..ba38a6f88c 100644 --- a/api/config/puma.rb +++ b/api/config/puma.rb @@ -38,8 +38,11 @@ is_development ? 16 : 6 end -pidfile pidfile_path -state_path state_path +if listen_on_socket + pidfile pidfile_path + state_path state_path +end + tag "manifold-#{application}" environment rails_environment workers number_of_workers diff --git a/api/lib/tasks/manifold_migrator.thor b/api/lib/tasks/manifold_migrator.thor new file mode 100644 index 0000000000..84d740f3b9 --- /dev/null +++ b/api/lib/tasks/manifold_migrator.thor @@ -0,0 +1,340 @@ +require_relative "../../config/environment" + +class ManifoldMigrator < Thor + ENV_FILE_OPTIONS = [ + "~/environment/01_unmanaged.env", + Rails.root.join(".env").to_s + ] + + desc "migrate_uploads", "Dewit" + def migrate_uploads + MirrorUploads.dewit + end + + desc "setup", "Set up this Manifold instance for asset migration" + def setup + say cool_manifold_logo, :green + + say <<~TEXT, :magenta, true + Hello! Welcome to the Manifold Migrator CLI Tool! + + TEXT + + sleep 3 + + say <<~TEXT, :magenta + This tool will help you prepare to migrate your Manifold instance from a bare-metal installation (i.e. .deb package) + to a modern Dockerized deployment. + + TEXT + + say process_outline + + sleep 3 + + @domain = ask("But first, what will be the host name of your new Manifold instance? (Default: #{ENV["DOMAIN"]})").presence || ENV["DOMAIN"] + + say "Step #1: Clone uploads to a cloud service", :bold + say "-----------------------------------------\n\n" + + sleep 2 + + @storage = Storage::Factory + + if @storage.primary_store_file? + say <<~TEXT, :bold + Your Manifold instance is currently using local file storage. This is not supported in a Dockerized hosting environment. + + TEXT + + setup_s3 + elsif @storage.primary_store_cloud? + setup_s3 if no? <<~TEXT, :bold + Looks like you're already using a cloud-hosted bucket for uploads. Great! Would you like to continue using this service + for your new Manifold instance? (y/n) + TEXT + end + + say "Step #2: Environment Variables", :bold + say "------------------------------\n\n" + + say <<~TEXT + Copy the following environment variables into your new hosting provider. + + TEXT + + say <<~TEXT, :yellow + NOTE: This list may not be complete! Check your environment and configuration carefully. + + TEXT + + say transition_env_vars + + rescue Interrupt + say "Ok I love you bye bye!", :red + exit 1 + end + + private + + def continue?(message = "Do you want to continue? (y/n)", color = :bold, on_exit = "Exiting. Hope to see you soon!", on_exit_color = :red) + if no? message, color + say(on_exit, on_exit_color) + exit 0 + else + say "\n" + end + end + + def setup_s3 + say <<~TEXT + The first step in this migration is to ensure all uploaded assets are moved to a S3-compatible storage bucket. To do that, + we'll set up an upload mirror to copy uploads to an S3-compatible service. + + In order to continue, you need to have a bucket set up in S3 or a S3-compatible system such as Digital Ocean Spaces. + You also need credentials handy for a user that has read/write access to the bucket. + + TEXT + + say "NOTE: This tool does not support setting up GCS buckets. If you'd like to use GCS, please set it up manually.\n", :yellow + + if @storage.mirror_store.present? + if @storage.mirror_store_file? + say <<~TEXT, :red + It looks like you already have an upload mirror defined, but it refers to a local file store. In order to continue, + we'll need to detach this mirror and replace it with an S3-compatible cloud storage service. + + TEXT + + continue? + else + say <<~TEXT, :red + It looks like you already have an upload mirror defined, and it points to a cloud service. + + TEXT + + if no? <<~TEXT, :bold + Would you like to use this mirror as the primary store for your new Manifold instance? (y/n) + TEXT + continue? "Ok, we can set up a new one, but it will detach your current mirror. Would you like to continue? (y/n)" + else + return + end + end + end + + collect_s3_mirror_info + + say <<~TEXT + Okay, we're setting things up to start mirroring uploads to your bucket. Hold on... + + TEXT + + setup_s3_mirror_env + + maybe_restart_and_enqueue_mirror_job + end + + def maybe_restart_and_enqueue_mirror_job + say <<~TEXT, :yellow + We are now ready to start mirroring uploads. But first, we need to restart the Manifold API. + + Once this happens, all existing uploaded files will start to be copied to the mirror in a background thread. + One thread at a time will be used for this job. Manifold will continue to function, but background jobs may be slower to complete. + TEXT + + return if no? "Do you want to restart Manifold and start uploading now? Manifold will be briefly unavailable while it restarts. (y/n)", :bold + + say "Startin' the jerb" + + UploadMigrationJob.perform_later + + say "Restarting..." + + puts `sudo service restart manifold_api` + end + + def collect_s3_mirror_info + say "Okay, let's collect that data from you." + + @bucket_name = ask("What's the name of the bucket you'll be migrating to?") + @region = ask("What's the region?") + @access_key_id = ask("What's the Access Key Id?") + @secret_access_key = ask("What's the Secret Access Key?") + @endpoint = ask("If you're using a service other than Amazon S3 (i.e. Digital Ocean Spaces), what's the endpoint URL?").presence + @force_path_style = yes?("Force path style URLs? (y/n)", :bold) + + say <<~TEXT + + Bucket Name: #{@bucket_name} + Region: #{@region} + Access Key ID: #{@access_key_id} + Secret Access Key: #{@secret_access_key} + Endpoint: #{@endpoint || "default"} + Force Path Style: #{@force_path_style.to_s} + + TEXT + + collect_s3_info if no? "Does this look correct? (y/n)", :bold + end + + def setup_s3_mirror_env + say_status "Processing", "Looking up environment file...", :yellow + env_file_location = ENV_FILE_OPTIONS.find { |fp| File.exist? fp } + + fatal!("Cannot find environment file") if env_file_location.blank? + + env_file = File.open(env_file_location, "w") + + say_status "Success", "Found environment file at #{env_file.path}." + + say_status "Processing", "Writing upload mirror variables...", :yellow + + env_file.write(s3_env_vars) + + say_status "Success", "Environment updated" + end + + def s3_env_vars + <<~TEXT + + MANIFOLD_SETTINGS_STORAGE_MIRROR='s3' + MANIFOLD_SETTINGS_STORAGE_MIRROR_BUCKET='#{@bucket_name}' + MANIFOLD_SETTINGS_STORAGE_MIRROR_PREFIX='#{@storage.primary_prefix}' + S3_ACCESS_KEY_ID='#{@access_key_id}' + S3_SECRET_ACCESS_KEY='#{@secret_access_key}' + S3_REGION='#{@region}' + S3_FORCE_PATH_STYLE='#{@force_path_style.to_s.upcase}' + #{@endpoint && "S3_ENDPOINT='#{@endpoint}'"} + TEXT + end + + def fatal!(message) + say_error "#{message}\n", :on_red + say_error "The above error is fatal. Exiting.", :red + exit 1 + end + + # Overrides + + # Default bold for asks + def ask(message, color = :bold) + super + end + + # Content methods + + def transition_env_vars + <<~TEXT + DOMAIN='#{@domain}' + MANIFOLD_SETTINGS_STORAGE_PRIMARY='s3' + MANIFOLD_SETTINGS_STORAGE_PRIMARY_BUCKET='#{@bucket_name}' + MANIFOLD_SETTINGS_STORAGE_CACHE_BUCKET='#{@storage.cache_bucket}' + MANIFOLD_SETTINGS_STORAGE_TUS_BUCKET='#{@storage.tus_bucket}' + MANIFOLD_SETTINGS_STORAGE_PRIMARY_PREFIX='#{@storage.primary_prefix}' + MANIFOLD_SETTINGS_STORAGE_CACHE_PREFIX='#{@storage.cache_prefix}' + MANIFOLD_SETTINGS_STORAGE_TUS_PREFIX='#{@storage.tus_prefix}' + S3_ACCESS_KEY_ID='#{@access_key_id}' + S3_SECRET_ACCESS_KEY='#{@secret_access_key}' + S3_REGION='#{@region}' + S3_FORCE_PATH_STYLE='#{@force_path_style.to_s.upcase}' + #{@endpoint && "S3_ENDPOINT='#{@endpoint}'"} + TEXT + end + + def process_outline + <<~TEXT + In order to seamless move from a bare metal installation to a Dockerized install, we need to do a few things: + + SETUP PHASE: + 1) Clone all uploaded assets to a S3-compatible storage bucket (unless they're already in one) + 2) Extract relevant configuration / environment variables + 3) Set up your new Dockerized hosting environment + + Then, when you're ready to make it happen: + + MIGRATION PHASE: + 4) Put this Manifold instance into maintenance/read-only mode + 5) Clone the database to your new hosting environment + 6) Change DNS records to point to the new host + + This tool can help with each step *except* #6. + + TEXT + end + + def cool_manifold_logo + <<-LOGO + Manifold Scholarship + + ╦ + ░░░╬φ╦, ,╓╤ + ░░░╬░░░░░╬╦╦, ,╓╦φ╣╬░░░░ + ░░░ "╚╬░░░░╬▒╦╦, ,╓╦φ╬╬░░░░░╬╩╙" + ░░░ `╙╩╬░░░░╬φ╦╖ ,╓╦φ╣╬░░░░░╬╩╙" + ░░░ "╙╩░░░░░╬╦╦φ╣╬░░░░░╬╩╙" ,╔╦φ + ░░░ ,╓╦╬░░░░░░░░░░░░, ,╓╦φ╬╬░░░░░ + ░░░ ,╓╦φ▒╬░░░░░╬╩╨╠░░░╙╩╬░░░░╬▒╦╦╦φ╬╬░░░░░╩╩╙` + ░░░ ,╦φ╣╬░░░░░╬╩╨" ]░░░ "╙╩░░░░░░░░░ + ░░░╬░░░░░░╩╨" ,╠░░░ "╚╬░░░░░╬╦╦,╔╦ê╬ + ░░╬╩╩" ,╓╦φ╣╬░░░░░░ `╙╩╬░░░░░░░ + ,╓╦φ╣╬░░░░░╬╩╙"╠░░░ `║░░░ + ,╓╦φ╣╬░░░░░╬╩╙" ]░░░ ║░░░ + φ╣╬░░░░░╬╩╨" ,╔╣░░░ ║░░░ + ░╬╩╙` ,╓╦φ╬╬░░░░░░░ ║░░░ + ,╓╦φ╬╬░░░░░╩╩╙` ╠░░░ ║░░░ + ,╓╦φ╣╬░░░░░╩╩"` ]░░░ ║░░░ + ╣╬░░░░░╩╩"` ,╔╦╣░░░ ║░░░ + ╬╩╙` ,╔╦φ╬░░░░░░░░░ ║░░░ + ,╔╦φ╬╬░░░░░╩╩" ]░░░ ║░░░ + ,╔╦φ╣░░░░░░╩╩"` ]░░░ ║░░░ + ░░░░░░╩╩"` ,╦╦╣╬░░░╣╦╦, ║░░░ + ╩"` ,╦╦╣╬░░░░░╬╩╩╙╩╬░░░░╬▒╦╖ ║░░░ + ,,╦╦▒╬░░░░░╬╩╙" `╙╩╬░░░░╬φ╦╓ ║░░░ + ,╦╦ê╣░░░░░░╩╨" "╚╬░░░░░╣╦╦, ║░░░ + ░░░░░╩╩" `╙╩╬░░░░╬╬░░░ + " `╙╩╬░░░ + " + LOGO + end + +end + + + +# This is a long-running job that manages the entire asset migration process +# It will enqueue individual uploads at a pace set by {delay} +# By default, delay is set to process reasonably slow in order to avoid overwhelming the worker +class MirrorUploads + def self.dewit(attachables = nil) + @attachables = attachables || default_attachables + + @attachables.each do |model| + attachments = model.shrine_attachment_configurations.keys + + puts "Mirroring attachments for model #{model.to_s}" + + model.find_each do |record| + attachments.each do |attachment_name| + attacher = record.send("#{attachment_name}_attacher") + next unless attacher.stored? + + puts "Putting #{model.to_s} #{record.id} #{attachment_name}" + attacher.file.trigger_mirror_upload + + attacher.map_derivative(attacher.derivatives) do |_, derivative| + derivative.trigger_mirror_upload + end + end + end + end + + end + + def self.default_attachables + ApplicationRecord.descendants.select { |model| model.include? Attachments } + end + +end + +MirrorUploads.dewit From f73d6e71654cdac1cf7ab51c80e463b4b4b45068 Mon Sep 17 00:00:00 2001 From: Tim Standen Date: Thu, 16 Oct 2025 09:00:15 -0700 Subject: [PATCH 16/43] [B] Fix ingestion sources not rerouting properly --- api/app/controllers/api/proxy/ingestion_sources_controller.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/app/controllers/api/proxy/ingestion_sources_controller.rb b/api/app/controllers/api/proxy/ingestion_sources_controller.rb index 696bdecd11..6170c367b6 100644 --- a/api/app/controllers/api/proxy/ingestion_sources_controller.rb +++ b/api/app/controllers/api/proxy/ingestion_sources_controller.rb @@ -10,7 +10,7 @@ def show if source.attachment.storage.respond_to? :path send_attachment(source) else - redirect_to source.attachment.url + redirect_to source.attachment.url, allow_other_host: true end end From bb20c6df80a9d5dd8273fb8779bff5cc3bf5c1f1 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Thu, 30 Oct 2025 18:06:10 -0700 Subject: [PATCH 17/43] [C] Restore metadown --- api/Gemfile | 2 +- api/Gemfile.lock | 9 +++++++++ api/config/initializers/30_monkeypatches.rb | 2 +- api/db/structure.sql | 4 ++-- 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/api/Gemfile b/api/Gemfile index 49fecd5714..84e96606c1 100644 --- a/api/Gemfile +++ b/api/Gemfile @@ -74,7 +74,7 @@ gem "lograge", "~> 0.10" gem "marcel", "~> 1.0.0" gem "maxminddb" gem "memoist", "~> 0.16.0" -# gem "metadown", git: "https://github.com/steveklabnik/metadown.git", tag: "v1.1.0.beta" +gem "metadown", git: "https://github.com/steveklabnik/metadown.git", tag: "v1.1.0.beta" gem "mime-types", "~> 3.3" gem "money", "~> 6.0" gem "namae", "~>1.0" diff --git a/api/Gemfile.lock b/api/Gemfile.lock index 738cd3cfaf..b57e2cf890 100644 --- a/api/Gemfile.lock +++ b/api/Gemfile.lock @@ -6,6 +6,14 @@ GIT fast_jsonapi (1.6.0) activesupport (>= 4.2) +GIT + remote: https://github.com/steveklabnik/metadown.git + revision: 34b79ad7e0bdf560ce13e384caa1626569397798 + tag: v1.1.0.beta + specs: + metadown (1.1.0.beta) + redcarpet + GEM remote: https://rubygems.org/ specs: @@ -928,6 +936,7 @@ DEPENDENCIES marcel (~> 1.0.0) maxminddb memoist (~> 0.16.0) + metadown! mime-types (~> 3.3) money (~> 6.0) namae (~> 1.0) diff --git a/api/config/initializers/30_monkeypatches.rb b/api/config/initializers/30_monkeypatches.rb index 342fdd88c2..baa466a024 100644 --- a/api/config/initializers/30_monkeypatches.rb +++ b/api/config/initializers/30_monkeypatches.rb @@ -1,7 +1,7 @@ # frozen_string_literal: true require Rails.root.join("lib", "patches", "better_interactions").to_s -# require Rails.root.join("lib", "patches", "metadown_custom_renderer").to_s +require Rails.root.join("lib", "patches", "metadown_custom_renderer").to_s require Rails.root.join("lib", "patches", "better_enums").to_s require Rails.root.join("lib", "patches", "for_shrine").to_s require Rails.root.join("lib", "patches", "friendly_id_uniqueness").to_s diff --git a/api/db/structure.sql b/api/db/structure.sql index a660678a58..577663f2d9 100644 --- a/api/db/structure.sql +++ b/api/db/structure.sql @@ -1,4 +1,4 @@ -\restrict mRJh1bEMqIKlORifWLRPjref0qy1FtwUYDQiKi0d19Z2JfhrL9GFesgbZeG4R02 +\restrict LgKdnkskBLdmQCPijahDvVGDIB1Zp5pO682FfA936h2a5tJZhZOaJoo4fgwOZEd -- Dumped from database version 13.22 -- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) @@ -7733,7 +7733,7 @@ ALTER TABLE ONLY public.reading_group_composite_entries -- PostgreSQL database dump complete -- -\unrestrict mRJh1bEMqIKlORifWLRPjref0qy1FtwUYDQiKi0d19Z2JfhrL9GFesgbZeG4R02 +\unrestrict LgKdnkskBLdmQCPijahDvVGDIB1Zp5pO682FfA936h2a5tJZhZOaJoo4fgwOZEd SET search_path TO "$user", public; From 7bde2b496c51509d206a107c219e7d71ffa3176e Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Thu, 30 Oct 2025 18:06:26 -0700 Subject: [PATCH 18/43] [B] Fix secret key base lookup - Moved from secrets to env --- api/config/initializers/25_lockbox.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/config/initializers/25_lockbox.rb b/api/config/initializers/25_lockbox.rb index 2e409c1ce0..e6b862ef4c 100644 --- a/api/config/initializers/25_lockbox.rb +++ b/api/config/initializers/25_lockbox.rb @@ -1,6 +1,6 @@ # frozen_string_literal: true -secret_key_base = Rails.application.secrets.secret_key_base +secret_key_base = Rails.application.secret_key_base secret_key_base = secret_key_base.presence || Lockbox.generate_key if Rails.env.test? From bb20a5c72213f1ec24be10c915cbff8a3d4f53b8 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 5 Nov 2025 16:17:25 -0800 Subject: [PATCH 19/43] [F] Add health check controller and route --- api/app/controllers/health_controller.rb | 5 +++++ api/config/routes.rb | 2 ++ api/db/structure.sql | 4 ++-- 3 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 api/app/controllers/health_controller.rb diff --git a/api/app/controllers/health_controller.rb b/api/app/controllers/health_controller.rb new file mode 100644 index 0000000000..4202a9ae3e --- /dev/null +++ b/api/app/controllers/health_controller.rb @@ -0,0 +1,5 @@ +class HealthController < ApplicationController + def show + render json: { ok: true } + end +end diff --git a/api/config/routes.rb b/api/config/routes.rb index ab93495378..f98d80e51a 100644 --- a/api/config/routes.rb +++ b/api/config/routes.rb @@ -19,6 +19,8 @@ mount GoodJob::Engine => '/api/good_job' end + get "up" => "health#show", as: :health_check + get "auth/:provider/callback", to: "oauth#authorize" namespace :api do diff --git a/api/db/structure.sql b/api/db/structure.sql index 577663f2d9..51b445c80a 100644 --- a/api/db/structure.sql +++ b/api/db/structure.sql @@ -1,4 +1,4 @@ -\restrict LgKdnkskBLdmQCPijahDvVGDIB1Zp5pO682FfA936h2a5tJZhZOaJoo4fgwOZEd +\restrict 9Bc9fJ2PHsc7n02xAZJDmgvpzjVsT9eHSTymQRDwssO2JqdupTVaZyvIEQPRG3V -- Dumped from database version 13.22 -- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) @@ -7733,7 +7733,7 @@ ALTER TABLE ONLY public.reading_group_composite_entries -- PostgreSQL database dump complete -- -\unrestrict LgKdnkskBLdmQCPijahDvVGDIB1Zp5pO682FfA936h2a5tJZhZOaJoo4fgwOZEd +\unrestrict 9Bc9fJ2PHsc7n02xAZJDmgvpzjVsT9eHSTymQRDwssO2JqdupTVaZyvIEQPRG3V SET search_path TO "$user", public; From d2ba28a4bdfebc4e68772240047b9dc3ef507b2d Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 5 Nov 2025 16:18:19 -0800 Subject: [PATCH 20/43] [B] Fix text section search index job failing on new app migration - Migration enqueued a job, but runs before good_jobs table is created --- ...180248_add_search_indexed_to_text_section_nodes.rb | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/api/db/migrate/20250527180248_add_search_indexed_to_text_section_nodes.rb b/api/db/migrate/20250527180248_add_search_indexed_to_text_section_nodes.rb index b1f1c64743..8c5a61c824 100644 --- a/api/db/migrate/20250527180248_add_search_indexed_to_text_section_nodes.rb +++ b/api/db/migrate/20250527180248_add_search_indexed_to_text_section_nodes.rb @@ -13,10 +13,13 @@ def change reversible do |dir| dir.up do if !Rails.env.test? && defined?(::TextSectionNodes::BackportSearchIndexJob) - begin - ::TextSectionNodes::BackportSearchIndexJob.set(wait: 10.minutes).perform_later - rescue StandardError - # Intentionally left blank + # If this is our first migration, good_jobs won't exist yet, but the db is empty anyway + if connection.table_exists?("good_jobs") + begin + ::TextSectionNodes::BackportSearchIndexJob.set(wait: 10.minutes).perform_later + rescue StandardError + # Intentionally left blank + end end end end From d8c2a82c7f0eea3f353d54549e9ba8016b2557dd Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Tue, 6 Jan 2026 13:02:42 -0800 Subject: [PATCH 21/43] [F] Log exceptions when rendering 500 API response --- api/app/controllers/application_controller.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/api/app/controllers/application_controller.rb b/api/app/controllers/application_controller.rb index 602aae8c5b..1da4b48649 100644 --- a/api/app/controllers/application_controller.rb +++ b/api/app/controllers/application_controller.rb @@ -167,6 +167,7 @@ def authority_forbidden(error) def render_error_response(error) # :nocov: raise error if Rails.env.test? + Rails.logger.error(["#{error.class.name} - #{error.message}", *error.backtrace].join("\n")) options = { status: 500, From 6af1a2b6fee29ad203755279fca9a3cfd7547c75 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Tue, 6 Jan 2026 13:03:00 -0800 Subject: [PATCH 22/43] [B] Fix JWT generation without secrets file --- api/lib/auth_token.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/lib/auth_token.rb b/api/lib/auth_token.rb index 87fa309fd1..0bf93ad1c0 100644 --- a/api/lib/auth_token.rb +++ b/api/lib/auth_token.rb @@ -10,7 +10,7 @@ class << self def encode(payload, ttl_in_minutes = DEFAULT_TTL) payload[:exp] = ttl_in_minutes.minutes.from_now.to_i - JWT.encode(payload, Rails.application.secrets.secret_key_base) + JWT.encode(payload, Rails.application.secret_key_base) end # @param [User] user @@ -26,7 +26,7 @@ def encode_user(user) # Decode a token and return the payload inside # If will throw an error if expired or invalid. See the docs for the JWT gem. def decode(token, leeway = nil) - payload, = JWT.decode(token, Rails.application.secrets.secret_key_base, leeway: leeway) + payload, = JWT.decode(token, Rails.application.secret_key_base, leeway: leeway) payload.with_indifferent_access end From 048b6358d3cc09a0e742134b1b19d1beb2e7860b Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Tue, 6 Jan 2026 13:03:36 -0800 Subject: [PATCH 23/43] [E] Remove asset host defaulting to S3 endpoint --- api/config/environments/development.rb | 1 + api/db/structure.sql | 391 +++++++++++++++++++------ api/lib/storage/factory.rb | 2 +- docker/local.env | 4 +- docker/manifold.env | 4 +- 5 files changed, 315 insertions(+), 87 deletions(-) diff --git a/api/config/environments/development.rb b/api/config/environments/development.rb index a8d3d99ac8..c5bcada7d1 100644 --- a/api/config/environments/development.rb +++ b/api/config/environments/development.rb @@ -32,6 +32,7 @@ config.hosts << "www.example.com" config.hosts << "localhost" config.hosts << "manifold.lvh" + config.hosts << "web.manifold.orb.local" config.hosts << "manifold-api.ngrok.io" config.hosts << "manifold-dev.ngrok.io" config.hosts << ENV["DOMAIN"] diff --git a/api/db/structure.sql b/api/db/structure.sql index 51b445c80a..27707b6730 100644 --- a/api/db/structure.sql +++ b/api/db/structure.sql @@ -1,4 +1,4 @@ -\restrict 9Bc9fJ2PHsc7n02xAZJDmgvpzjVsT9eHSTymQRDwssO2JqdupTVaZyvIEQPRG3V +\restrict aFbNPfJ9QJhNrDvHxcEauL46R8ZoAcf6sxZdkQYngf6yB0PX4fAzrMSUaWBNy8V -- Dumped from database version 13.22 -- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) @@ -402,7 +402,7 @@ CREATE TABLE public.action_callouts ( -- CREATE TABLE public.analytics_events ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, visit_id uuid NOT NULL, name character varying NOT NULL, properties jsonb DEFAULT '{}'::jsonb NOT NULL, @@ -415,7 +415,7 @@ CREATE TABLE public.analytics_events ( -- CREATE TABLE public.analytics_visits ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, visit_token character varying NOT NULL, visitor_token character varying NOT NULL, ip character varying, @@ -473,7 +473,8 @@ CREATE TABLE public.annotations ( marked_for_purge_at timestamp without time zone, resolved_flags_count bigint DEFAULT 0 NOT NULL, unresolved_flags_count bigint DEFAULT 0 NOT NULL, - flagger_ids uuid[] DEFAULT '{}'::uuid[] NOT NULL + flagger_ids uuid[] DEFAULT '{}'::uuid[] NOT NULL, + reader_display_format text ); @@ -508,7 +509,7 @@ CREATE TABLE public.comments ( -- CREATE TABLE public.reading_group_memberships ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, user_id uuid, reading_group_id uuid, created_at timestamp without time zone NOT NULL, @@ -651,8 +652,8 @@ CREATE VIEW public.annotation_reading_group_memberships AS CREATE TABLE public.ar_internal_metadata ( key character varying NOT NULL, value character varying, - created_at timestamp without time zone NOT NULL, - updated_at timestamp without time zone NOT NULL + created_at timestamp(6) without time zone NOT NULL, + updated_at timestamp(6) without time zone NOT NULL ); @@ -661,7 +662,7 @@ CREATE TABLE public.ar_internal_metadata ( -- CREATE TABLE public.cached_external_source_links ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, cached_external_source_id uuid NOT NULL, text_id uuid NOT NULL, created_at timestamp without time zone NOT NULL, @@ -674,7 +675,7 @@ CREATE TABLE public.cached_external_source_links ( -- CREATE TABLE public.cached_external_sources ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, url text NOT NULL, source_identifier text NOT NULL, kind text DEFAULT 'unknown'::text NOT NULL, @@ -865,7 +866,9 @@ CREATE TABLE public.projects ( marked_for_purge_at timestamp without time zone, social_image_data jsonb, social_description text, - social_title text + social_title text, + orphaned_journal_issue_id uuid, + orphaned_journal_issue boolean DEFAULT false NOT NULL ); @@ -984,7 +987,7 @@ CREATE TABLE public.content_blocks ( -- CREATE TABLE public.entitlement_roles ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, name text NOT NULL, kind text DEFAULT 'unknown'::text NOT NULL, created_at timestamp without time zone NOT NULL, @@ -1054,7 +1057,7 @@ CREATE TABLE public.entitlement_transitions ( -- CREATE TABLE public.entitlement_user_links ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, entitlement_id uuid NOT NULL, user_id uuid NOT NULL, created_at timestamp without time zone NOT NULL, @@ -1067,7 +1070,7 @@ CREATE TABLE public.entitlement_user_links ( -- CREATE TABLE public.entitlements ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, target_type character varying NOT NULL, target_id uuid NOT NULL, entitler_id uuid NOT NULL, @@ -1091,9 +1094,9 @@ CREATE TABLE public.entitlements ( -- CREATE TABLE public.journal_issues ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, journal_id uuid NOT NULL, journal_volume_id uuid, creator_id uuid, @@ -1109,9 +1112,9 @@ CREATE TABLE public.journal_issues ( -- CREATE TABLE public.journal_volumes ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, journal_id uuid NOT NULL, creator_id uuid, number integer, @@ -1282,7 +1285,7 @@ CREATE MATERIALIZED VIEW public.entitlement_grant_audits AS -- CREATE TABLE public.entitlement_import_row_transitions ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, to_state character varying NOT NULL, metadata jsonb DEFAULT '{}'::jsonb, sort_key integer NOT NULL, @@ -1298,7 +1301,7 @@ CREATE TABLE public.entitlement_import_row_transitions ( -- CREATE TABLE public.entitlement_import_rows ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, entitlement_import_id uuid NOT NULL, entitlement_id uuid, subject_type character varying, @@ -1309,8 +1312,8 @@ CREATE TABLE public.entitlement_import_rows ( email public.citext, expires_on date, messages text[] DEFAULT '{}'::text[] NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL, + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, pending_entitlement_id uuid, expiration text, first_name text, @@ -1323,7 +1326,7 @@ CREATE TABLE public.entitlement_import_rows ( -- CREATE TABLE public.entitlement_import_transitions ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, to_state character varying NOT NULL, metadata jsonb DEFAULT '{}'::jsonb, sort_key integer NOT NULL, @@ -1339,14 +1342,14 @@ CREATE TABLE public.entitlement_import_transitions ( -- CREATE TABLE public.entitlement_imports ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, creator_id uuid, name public.citext NOT NULL, file_data jsonb, entitlement_import_rows_count bigint DEFAULT 0 NOT NULL, messages text[] DEFAULT '{}'::text[] NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL ); @@ -1355,7 +1358,7 @@ CREATE TABLE public.entitlement_imports ( -- CREATE TABLE public.reading_groups ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, name character varying, privacy character varying DEFAULT 'private'::character varying, invitation_code character varying, @@ -1463,7 +1466,7 @@ ALTER SEQUENCE public.entitlement_transitions_id_seq OWNED BY public.entitlement -- CREATE TABLE public.entitlers ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, entity_type character varying NOT NULL, entity_id uuid NOT NULL, name text NOT NULL, @@ -1506,7 +1509,7 @@ CREATE TABLE public.events ( -- CREATE TABLE public.export_targets ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, strategy text DEFAULT 'unknown'::text NOT NULL, name text NOT NULL, slug text NOT NULL, @@ -1516,12 +1519,26 @@ CREATE TABLE public.export_targets ( ); +-- +-- Name: external_identifiers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.external_identifiers ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + identifier character varying NOT NULL, + identifiable_type character varying, + identifiable_id uuid, + created_at timestamp(6) without time zone NOT NULL, + updated_at timestamp(6) without time zone NOT NULL +); + + -- -- Name: user_collected_composite_entries; Type: TABLE; Schema: public; Owner: - -- CREATE TABLE public.user_collected_composite_entries ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, user_id uuid NOT NULL, collectable_type character varying NOT NULL, collectable_id uuid NOT NULL, @@ -1665,6 +1682,7 @@ CREATE TABLE public.friendly_id_slugs ( -- CREATE SEQUENCE public.friendly_id_slugs_id_seq + AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -1902,8 +1920,8 @@ CREATE TABLE public.ingestions ( CREATE TABLE public.journal_subjects ( id bigint NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL, + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, journal_id uuid NOT NULL, subject_id uuid NOT NULL ); @@ -1933,9 +1951,9 @@ ALTER SEQUENCE public.journal_subjects_id_seq OWNED BY public.journal_subjects.i -- CREATE TABLE public.journals ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, title character varying, subtitle character varying, description text, @@ -2049,6 +2067,7 @@ CREATE TABLE public.pages ( -- CREATE SEQUENCE public.pages_id_seq + AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2068,14 +2087,14 @@ ALTER SEQUENCE public.pages_id_seq OWNED BY public.pages.id; -- CREATE TABLE public.pending_entitlement_transitions ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, pending_entitlement_id uuid NOT NULL, most_recent boolean NOT NULL, sort_key integer NOT NULL, to_state character varying NOT NULL, metadata jsonb, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL ); @@ -2084,7 +2103,7 @@ CREATE TABLE public.pending_entitlement_transitions ( -- CREATE TABLE public.pending_entitlements ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, creator_id uuid, entitlement_id uuid, user_id uuid, @@ -2097,8 +2116,8 @@ CREATE TABLE public.pending_entitlements ( first_name text, last_name text, messages text[] DEFAULT '{}'::text[] NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL ); @@ -2143,7 +2162,9 @@ CREATE TABLE public.pg_search_documents ( metadata jsonb, created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - tsv_composite tsvector GENERATED ALWAYS AS ((((((((public.to_unaccented_weighted_tsv(title, 'A'::"char") || public.to_unaccented_weighted_tsv(primary_data, 'A'::"char")) || public.to_unaccented_weighted_tsv(secondary, 'B'::"char")) || public.to_unaccented_weighted_tsv(secondary_data, 'B'::"char")) || public.to_unaccented_weighted_tsv(tertiary, 'C'::"char")) || public.to_unaccented_weighted_tsv(tertiary_data, 'C'::"char")) || public.to_unaccented_weighted_tsv(content, 'D'::"char")) || public.to_unaccented_weighted_tsv(metadata, 'D'::"char"))) STORED NOT NULL + tsv_composite tsvector GENERATED ALWAYS AS ((((((((public.to_unaccented_weighted_tsv(title, 'A'::"char") || public.to_unaccented_weighted_tsv(primary_data, 'A'::"char")) || public.to_unaccented_weighted_tsv(secondary, 'B'::"char")) || public.to_unaccented_weighted_tsv(secondary_data, 'B'::"char")) || public.to_unaccented_weighted_tsv(tertiary, 'C'::"char")) || public.to_unaccented_weighted_tsv(tertiary_data, 'C'::"char")) || public.to_unaccented_weighted_tsv(content, 'D'::"char")) || public.to_unaccented_weighted_tsv(metadata, 'D'::"char"))) STORED NOT NULL, + journal_issue_id uuid, + journal_content boolean DEFAULT false NOT NULL ); @@ -2165,6 +2186,7 @@ CREATE TABLE public.project_collection_subjects ( -- CREATE SEQUENCE public.project_collection_subjects_id_seq + AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2184,7 +2206,7 @@ ALTER SEQUENCE public.project_collection_subjects_id_seq OWNED BY public.project -- CREATE TABLE public.project_exports ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, project_id uuid NOT NULL, export_kind text DEFAULT 'unknown'::text NOT NULL, fingerprint text NOT NULL, @@ -2220,7 +2242,7 @@ CREATE VIEW public.project_export_statuses AS -- CREATE TABLE public.project_exportation_transitions ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, to_state text NOT NULL, metadata jsonb DEFAULT '{}'::jsonb, sort_key integer NOT NULL, @@ -2236,7 +2258,7 @@ CREATE TABLE public.project_exportation_transitions ( -- CREATE TABLE public.project_exportations ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, project_id uuid NOT NULL, export_target_id uuid NOT NULL, project_export_id uuid, @@ -2264,6 +2286,7 @@ CREATE TABLE public.project_subjects ( -- CREATE SEQUENCE public.project_subjects_id_seq + AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2313,7 +2336,7 @@ CREATE VIEW public.project_summaries AS -- CREATE TABLE public.reading_group_categories ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, "position" integer, title text NOT NULL, @@ -2331,7 +2354,7 @@ CREATE TABLE public.reading_group_categories ( -- CREATE TABLE public.reading_group_composite_entries ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, collectable_type character varying NOT NULL, collectable_id uuid NOT NULL, @@ -2392,7 +2415,7 @@ CREATE VIEW public.reading_group_collections AS -- CREATE TABLE public.reading_group_projects ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, project_id uuid NOT NULL, reading_group_category_id uuid, @@ -2407,7 +2430,7 @@ CREATE TABLE public.reading_group_projects ( -- CREATE TABLE public.reading_group_resource_collections ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, resource_collection_id uuid NOT NULL, reading_group_category_id uuid, @@ -2422,7 +2445,7 @@ CREATE TABLE public.reading_group_resource_collections ( -- CREATE TABLE public.reading_group_resources ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, resource_id uuid NOT NULL, reading_group_category_id uuid, @@ -2437,7 +2460,7 @@ CREATE TABLE public.reading_group_resources ( -- CREATE TABLE public.reading_group_texts ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, text_id uuid NOT NULL, reading_group_category_id uuid, @@ -2493,13 +2516,13 @@ CREATE VIEW public.reading_group_counts AS -- CREATE TABLE public.reading_group_journal_issues ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, journal_issue_id uuid NOT NULL, reading_group_category_id uuid, "position" integer, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL ); @@ -2508,11 +2531,11 @@ CREATE TABLE public.reading_group_journal_issues ( -- CREATE TABLE public.reading_group_kinds ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, name text NOT NULL, slug text NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL ); @@ -2539,13 +2562,13 @@ CREATE VIEW public.reading_group_membership_counts AS -- CREATE TABLE public.reading_group_text_sections ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, reading_group_id uuid NOT NULL, text_section_id uuid NOT NULL, reading_group_category_id uuid, "position" integer, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL ); @@ -2633,6 +2656,7 @@ CREATE TABLE public.resource_import_row_transitions ( -- CREATE SEQUENCE public.resource_import_row_transitions_id_seq + AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2686,6 +2710,7 @@ CREATE TABLE public.resource_import_transitions ( -- CREATE SEQUENCE public.resource_import_transitions_id_seq + AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2854,6 +2879,7 @@ CREATE TABLE public.settings ( -- CREATE SEQUENCE public.settings_id_seq + AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2907,7 +2933,7 @@ CREATE TABLE public.subjects ( -- CREATE TABLE public.system_entitlements ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, kind text NOT NULL, created_at timestamp without time zone NOT NULL, updated_at timestamp without time zone NOT NULL @@ -2935,6 +2961,7 @@ CREATE TABLE public.taggings ( -- CREATE SEQUENCE public.taggings_id_seq + AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2965,6 +2992,7 @@ CREATE TABLE public.tags ( -- CREATE SEQUENCE public.tags_id_seq + AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -2984,7 +3012,7 @@ ALTER SEQUENCE public.tags_id_seq OWNED BY public.tags.id; -- CREATE TABLE public.text_exports ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, text_id uuid NOT NULL, export_kind text DEFAULT 'unknown'::text NOT NULL, fingerprint text NOT NULL, @@ -3103,6 +3131,7 @@ CREATE TABLE public.text_section_stylesheets ( -- CREATE SEQUENCE public.text_section_stylesheets_id_seq + AS integer START WITH 1 INCREMENT BY 1 NO MINVALUE @@ -3297,11 +3326,11 @@ CREATE TABLE public.upgrade_results ( -- CREATE TABLE public.user_collected_journal_issues ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, user_id uuid NOT NULL, journal_issue_id uuid NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL ); @@ -3310,11 +3339,11 @@ CREATE TABLE public.user_collected_journal_issues ( -- CREATE TABLE public.user_collected_projects ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, user_id uuid NOT NULL, project_id uuid NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL ); @@ -3323,11 +3352,11 @@ CREATE TABLE public.user_collected_projects ( -- CREATE TABLE public.user_collected_resource_collections ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, user_id uuid NOT NULL, resource_collection_id uuid NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL ); @@ -3336,11 +3365,11 @@ CREATE TABLE public.user_collected_resource_collections ( -- CREATE TABLE public.user_collected_resources ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, user_id uuid NOT NULL, resource_id uuid NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL ); @@ -3349,11 +3378,11 @@ CREATE TABLE public.user_collected_resources ( -- CREATE TABLE public.user_collected_text_sections ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, user_id uuid NOT NULL, text_section_id uuid NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL ); @@ -3362,11 +3391,11 @@ CREATE TABLE public.user_collected_text_sections ( -- CREATE TABLE public.user_collected_texts ( - id uuid DEFAULT public.gen_random_uuid() NOT NULL, + id uuid DEFAULT gen_random_uuid() NOT NULL, user_id uuid NOT NULL, text_id uuid NOT NULL, - created_at timestamp(6) without time zone DEFAULT now() NOT NULL, - updated_at timestamp(6) without time zone DEFAULT now() NOT NULL + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL ); @@ -3435,6 +3464,47 @@ CREATE VIEW public.user_derived_roles AS GROUP BY u.id; +-- +-- Name: user_group_entitleables; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.user_group_entitleables ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + user_group_id uuid NOT NULL, + entitleable_type character varying NOT NULL, + entitleable_id uuid NOT NULL, + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + + +-- +-- Name: user_group_memberships; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.user_group_memberships ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + user_id uuid NOT NULL, + user_group_id uuid NOT NULL, + source_type character varying, + source_id uuid, + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + + +-- +-- Name: user_groups; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.user_groups ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + name text NOT NULL, + created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + + -- -- Name: version_associations; Type: TABLE; Schema: public; Owner: - -- @@ -3778,6 +3848,14 @@ ALTER TABLE ONLY public.export_targets ADD CONSTRAINT export_targets_pkey PRIMARY KEY (id); +-- +-- Name: external_identifiers external_identifiers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.external_identifiers + ADD CONSTRAINT external_identifiers_pkey PRIMARY KEY (id); + + -- -- Name: features features_pkey; Type: CONSTRAINT; Schema: public; Owner: - -- @@ -4386,6 +4464,30 @@ ALTER TABLE ONLY public.user_collected_texts ADD CONSTRAINT user_collected_texts_pkey PRIMARY KEY (id); +-- +-- Name: user_group_entitleables user_group_entitleables_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.user_group_entitleables + ADD CONSTRAINT user_group_entitleables_pkey PRIMARY KEY (id); + + +-- +-- Name: user_group_memberships user_group_memberships_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.user_group_memberships + ADD CONSTRAINT user_group_memberships_pkey PRIMARY KEY (id); + + +-- +-- Name: user_groups user_groups_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.user_groups + ADD CONSTRAINT user_groups_pkey PRIMARY KEY (id); + + -- -- Name: users users_pkey; Type: CONSTRAINT; Schema: public; Owner: - -- @@ -5012,6 +5114,20 @@ CREATE UNIQUE INDEX index_export_targets_on_slug ON public.export_targets USING CREATE INDEX index_export_targets_on_strategy ON public.export_targets USING btree (strategy); +-- +-- Name: index_external_identifiers_on_identifiable; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_external_identifiers_on_identifiable ON public.external_identifiers USING btree (identifiable_type, identifiable_id); + + +-- +-- Name: index_external_identifiers_on_identifier; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_external_identifiers_on_identifier ON public.external_identifiers USING btree (identifier); + + -- -- Name: index_flags_on_flaggable_type_and_flaggable_id; Type: INDEX; Schema: public; Owner: - -- @@ -5481,6 +5597,13 @@ CREATE INDEX index_pending_entitlements_on_user_id ON public.pending_entitlement CREATE INDEX index_pg_search_documents_on_journal_id ON public.pg_search_documents USING btree (journal_id); +-- +-- Name: index_pg_search_documents_on_journal_issue_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_pg_search_documents_on_journal_issue_id ON public.pg_search_documents USING btree (journal_issue_id); + + -- -- Name: index_pg_search_documents_on_project_id; Type: INDEX; Schema: public; Owner: - -- @@ -6762,6 +6885,62 @@ CREATE INDEX index_user_collected_text_sections_on_user_id ON public.user_collec CREATE INDEX index_user_collected_texts_on_user_id ON public.user_collected_texts USING btree (user_id); +-- +-- Name: index_user_group_entitleables_on_entitleable; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_user_group_entitleables_on_entitleable ON public.user_group_entitleables USING btree (entitleable_type, entitleable_id); + + +-- +-- Name: index_user_group_entitleables_on_user_group_and_entitleable; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_user_group_entitleables_on_user_group_and_entitleable ON public.user_group_entitleables USING btree (user_group_id, entitleable_type, entitleable_id); + + +-- +-- Name: index_user_group_entitleables_on_user_group_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_user_group_entitleables_on_user_group_id ON public.user_group_entitleables USING btree (user_group_id); + + +-- +-- Name: index_user_group_memberships_on_source; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_user_group_memberships_on_source ON public.user_group_memberships USING btree (source_type, source_id); + + +-- +-- Name: index_user_group_memberships_on_user_group_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_user_group_memberships_on_user_group_id ON public.user_group_memberships USING btree (user_group_id); + + +-- +-- Name: index_user_group_memberships_on_user_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_user_group_memberships_on_user_id ON public.user_group_memberships USING btree (user_id); + + +-- +-- Name: index_user_group_memberships_on_user_id_and_user_group_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_user_group_memberships_on_user_id_and_user_group_id ON public.user_group_memberships USING btree (user_id, user_group_id); + + +-- +-- Name: index_user_groups_on_name; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_user_groups_on_name ON public.user_groups USING btree (name); + + -- -- Name: index_users_on_deleted_at; Type: INDEX; Schema: public; Owner: - -- @@ -7001,6 +7180,22 @@ ALTER TABLE ONLY public.user_collected_texts ADD CONSTRAINT fk_rails_127b46870c FOREIGN KEY (text_id) REFERENCES public.texts(id) ON DELETE CASCADE; +-- +-- Name: journal_issues fk_rails_159f2e66d4; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.journal_issues + ADD CONSTRAINT fk_rails_159f2e66d4 FOREIGN KEY (journal_id) REFERENCES public.journals(id) ON DELETE RESTRICT; + + +-- +-- Name: journal_issues fk_rails_15a20a3530; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.journal_issues + ADD CONSTRAINT fk_rails_15a20a3530 FOREIGN KEY (journal_volume_id) REFERENCES public.journal_volumes(id) ON DELETE RESTRICT; + + -- -- Name: entitlement_import_transitions fk_rails_19acd61494; Type: FK CONSTRAINT; Schema: public; Owner: - -- @@ -7057,6 +7252,14 @@ ALTER TABLE ONLY public.pending_entitlement_transitions ADD CONSTRAINT fk_rails_292c17a15e FOREIGN KEY (pending_entitlement_id) REFERENCES public.pending_entitlements(id) ON DELETE CASCADE; +-- +-- Name: projects fk_rails_2a006842be; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.projects + ADD CONSTRAINT fk_rails_2a006842be FOREIGN KEY (journal_issue_id) REFERENCES public.journal_issues(id) ON DELETE RESTRICT; + + -- -- Name: reading_group_composite_entries fk_rails_313af69a44; Type: FK CONSTRAINT; Schema: public; Owner: - -- @@ -7465,6 +7668,14 @@ ALTER TABLE ONLY public.reading_group_projects ADD CONSTRAINT fk_rails_af4c0905cb FOREIGN KEY (reading_group_id) REFERENCES public.reading_groups(id) ON DELETE CASCADE; +-- +-- Name: pg_search_documents fk_rails_b02f365b4d; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.pg_search_documents + ADD CONSTRAINT fk_rails_b02f365b4d FOREIGN KEY (journal_issue_id) REFERENCES public.journal_issues(id) ON DELETE SET NULL; + + -- -- Name: import_selection_matches fk_rails_b3b5d1b78b; Type: FK CONSTRAINT; Schema: public; Owner: - -- @@ -7625,6 +7836,14 @@ ALTER TABLE ONLY public.user_collected_composite_entries ADD CONSTRAINT fk_rails_e03a5be0da FOREIGN KEY (project_id) REFERENCES public.projects(id) ON DELETE CASCADE; +-- +-- Name: journal_volumes fk_rails_e11de3191d; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.journal_volumes + ADD CONSTRAINT fk_rails_e11de3191d FOREIGN KEY (journal_id) REFERENCES public.journals(id) ON DELETE RESTRICT; + + -- -- Name: user_collected_text_sections fk_rails_e3bf44e760; Type: FK CONSTRAINT; Schema: public; Owner: - -- @@ -7733,7 +7952,7 @@ ALTER TABLE ONLY public.reading_group_composite_entries -- PostgreSQL database dump complete -- -\unrestrict 9Bc9fJ2PHsc7n02xAZJDmgvpzjVsT9eHSTymQRDwssO2JqdupTVaZyvIEQPRG3V +\unrestrict aFbNPfJ9QJhNrDvHxcEauL46R8ZoAcf6sxZdkQYngf6yB0PX4fAzrMSUaWBNy8V SET search_path TO "$user", public; @@ -8092,14 +8311,20 @@ INSERT INTO "schema_migrations" (version) VALUES ('20250603192547'), ('20250609191642'), ('20250609192241'), +('20250723210143'), ('20251016204352'), ('20251017174417'), ('20251017211501'), ('20251020225421'), +('20251022183946'), ('20251103175506'), ('20251103175949'), ('20251103180007'), ('20251105165521'), -('20251121202033'); +('20251117204731'), +('20251120233556'), +('20251121202033'), +('20251203230443'), +('20251203231940'); diff --git a/api/lib/storage/factory.rb b/api/lib/storage/factory.rb index ce8e18b346..a78ef5b6ee 100644 --- a/api/lib/storage/factory.rb +++ b/api/lib/storage/factory.rb @@ -219,7 +219,7 @@ def asset_host if primary_store.file? Rails.configuration.manifold.api_url&.sub(%r{/\z}, "") || "" else - UploadConfig.asset_host || S3Config.endpoint + UploadConfig.asset_host end end end diff --git a/docker/local.env b/docker/local.env index cffafee94e..df4930d926 100644 --- a/docker/local.env +++ b/docker/local.env @@ -11,9 +11,9 @@ API_CABLE_PORT=13120 CLIENT_URL=http://localhost:13100 -CLIENT_BROWSER_API_URL=http://localhost:13110 +CLIENT_BROWSER_API_URL=https://web.manifold.orb.local CLIENT_BROWSER_API_CABLE_URL=http://localhost:13120 -CLIENT_SERVER_API_URL=http://web:13110 +CLIENT_SERVER_API_URL=https://web.manifold.orb.local CLIENT_SERVER_PROXIES=true diff --git a/docker/manifold.env b/docker/manifold.env index 336dc63aff..7fbe2fca0a 100644 --- a/docker/manifold.env +++ b/docker/manifold.env @@ -39,4 +39,6 @@ PGDATABASE=manifold_development S3_ENDPOINT=http://minio:9000 S3_ACCESS_KEY_ID=minio S3_SECRET_ACCESS_KEY=minio123 -UPLOAD_BUCKET=manifold-storage \ No newline at end of file +UPLOAD_BUCKET=manifold-storage + +RAILS_LOG_TO_STD_OUT=TRUE \ No newline at end of file From 5dfa940185f7c405b08f9f79f7b20aa6809cd103 Mon Sep 17 00:00:00 2001 From: Zach Davis Date: Wed, 7 Jan 2026 16:05:18 -0800 Subject: [PATCH 24/43] [B] Fix http-proxy-middleware by adding changeOrigin option The proxy was failing with 403 errors when proxying to HTTPS targets because the Host header wasn't being rewritten. Added changeOrigin: true to fix this. Also added optional debug logging via PROXY_DEBUG env var. --- client/src/servers/common/ProxyHelper.js | 45 +++++++++++++++++++++--- 1 file changed, 41 insertions(+), 4 deletions(-) diff --git a/client/src/servers/common/ProxyHelper.js b/client/src/servers/common/ProxyHelper.js index cc2c798f37..8f638148d6 100644 --- a/client/src/servers/common/ProxyHelper.js +++ b/client/src/servers/common/ProxyHelper.js @@ -5,6 +5,8 @@ import isRegExp from "lodash/isRegExp"; import serveStatic from "serve-static"; import path from "path"; +const proxyDebug = process.env.PROXY_DEBUG === "true"; + class ProxyHelper { constructor(name) { this.name = name; @@ -13,6 +15,41 @@ class ProxyHelper { this.wwwTarget = path.join(__dirname, "..", "www"); } + proxyOptions(proxyPath, target, logLevel) { + const options = { + target, + logLevel: proxyDebug ? logLevel : "silent", + changeOrigin: true, + onError: (err, req, res) => { + ch.error( + `[Proxy Error] ${this.name} | ${proxyPath} -> ${target} | ${req.method} ${req.url}` + ); + ch.error(`[Proxy Error] ${err.message}`); + ch.error(err.stack); + } + }; + + if (proxyDebug) { + options.onProxyReq = (proxyReq, req) => { + const clientIp = + req.headers["x-forwarded-for"] || req.connection.remoteAddress; + ch.info( + `[Proxy Req] ${this.name} | ${req.method} ${req.url} -> ${target}${req.url} | IP: ${clientIp}` + ); + }; + options.onProxyRes = (proxyRes, req) => { + ch.info( + `[Proxy Res] ${this.name} | ${req.method} ${req.url} | Status: ${proxyRes.statusCode}` + ); + if (proxyRes.headers.location) { + ch.info(`[Proxy Res] Redirect Location: ${proxyRes.headers.location}`); + } + }; + } + + return options; + } + proxyAPIPaths(app) { this.defineProxy(app, "/system", this.apiAssetTarget); this.defineProxy(app, "/api/proxy", this.apiAssetTarget); @@ -49,17 +86,17 @@ class ProxyHelper { app.use(proxyPath, serveStatic(target, serveStaticOptions)); } - defineProxy(app, proxyPath, target, logLevel = "silent") { + defineProxy(app, proxyPath, target, logLevel = "debug") { if (isRegExp(proxyPath)) return this.defineRegExpProxy(app, proxyPath, target, logLevel); ch.background( `${this.name} server will proxy ${proxyPath} requests to ${target}.` ); - app.use(proxyPath, proxy({ target, logLevel })); + app.use(proxyPath, proxy(this.proxyOptions(proxyPath, target, logLevel))); } - defineRegExpProxy(app, proxyPath, target, logLevel = "silent") { - const theProxy = proxy({ target, logLevel }); + defineRegExpProxy(app, proxyPath, target, logLevel = "debug") { + const theProxy = proxy(this.proxyOptions(proxyPath, target, logLevel)); ch.background( `${ this.name From 1e04d141db85be23351c2b85afbe5589a8e6c7dc Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 7 Jan 2026 16:19:40 -0800 Subject: [PATCH 25/43] [C] Lint --- api/app/controllers/health_controller.rb | 2 + api/config/initializers/rack_attack.rb | 2 +- api/lib/tasks/manifold_migrator.thor | 340 ----------------------- client/script/build-browser-config.js | 4 +- 4 files changed, 5 insertions(+), 343 deletions(-) delete mode 100644 api/lib/tasks/manifold_migrator.thor diff --git a/api/app/controllers/health_controller.rb b/api/app/controllers/health_controller.rb index 4202a9ae3e..60988f2334 100644 --- a/api/app/controllers/health_controller.rb +++ b/api/app/controllers/health_controller.rb @@ -1,3 +1,5 @@ +# frozen_string_literal: true + class HealthController < ApplicationController def show render json: { ok: true } diff --git a/api/config/initializers/rack_attack.rb b/api/config/initializers/rack_attack.rb index 8ae08481f1..180aab7f4e 100644 --- a/api/config/initializers/rack_attack.rb +++ b/api/config/initializers/rack_attack.rb @@ -5,7 +5,7 @@ "10.244.0.0/16", "10.245.0.0/16", "10.246.0.0/16" -] +].freeze # :nocov: # We want to ensure that the public IP used by the client is never diff --git a/api/lib/tasks/manifold_migrator.thor b/api/lib/tasks/manifold_migrator.thor deleted file mode 100644 index 84d740f3b9..0000000000 --- a/api/lib/tasks/manifold_migrator.thor +++ /dev/null @@ -1,340 +0,0 @@ -require_relative "../../config/environment" - -class ManifoldMigrator < Thor - ENV_FILE_OPTIONS = [ - "~/environment/01_unmanaged.env", - Rails.root.join(".env").to_s - ] - - desc "migrate_uploads", "Dewit" - def migrate_uploads - MirrorUploads.dewit - end - - desc "setup", "Set up this Manifold instance for asset migration" - def setup - say cool_manifold_logo, :green - - say <<~TEXT, :magenta, true - Hello! Welcome to the Manifold Migrator CLI Tool! - - TEXT - - sleep 3 - - say <<~TEXT, :magenta - This tool will help you prepare to migrate your Manifold instance from a bare-metal installation (i.e. .deb package) - to a modern Dockerized deployment. - - TEXT - - say process_outline - - sleep 3 - - @domain = ask("But first, what will be the host name of your new Manifold instance? (Default: #{ENV["DOMAIN"]})").presence || ENV["DOMAIN"] - - say "Step #1: Clone uploads to a cloud service", :bold - say "-----------------------------------------\n\n" - - sleep 2 - - @storage = Storage::Factory - - if @storage.primary_store_file? - say <<~TEXT, :bold - Your Manifold instance is currently using local file storage. This is not supported in a Dockerized hosting environment. - - TEXT - - setup_s3 - elsif @storage.primary_store_cloud? - setup_s3 if no? <<~TEXT, :bold - Looks like you're already using a cloud-hosted bucket for uploads. Great! Would you like to continue using this service - for your new Manifold instance? (y/n) - TEXT - end - - say "Step #2: Environment Variables", :bold - say "------------------------------\n\n" - - say <<~TEXT - Copy the following environment variables into your new hosting provider. - - TEXT - - say <<~TEXT, :yellow - NOTE: This list may not be complete! Check your environment and configuration carefully. - - TEXT - - say transition_env_vars - - rescue Interrupt - say "Ok I love you bye bye!", :red - exit 1 - end - - private - - def continue?(message = "Do you want to continue? (y/n)", color = :bold, on_exit = "Exiting. Hope to see you soon!", on_exit_color = :red) - if no? message, color - say(on_exit, on_exit_color) - exit 0 - else - say "\n" - end - end - - def setup_s3 - say <<~TEXT - The first step in this migration is to ensure all uploaded assets are moved to a S3-compatible storage bucket. To do that, - we'll set up an upload mirror to copy uploads to an S3-compatible service. - - In order to continue, you need to have a bucket set up in S3 or a S3-compatible system such as Digital Ocean Spaces. - You also need credentials handy for a user that has read/write access to the bucket. - - TEXT - - say "NOTE: This tool does not support setting up GCS buckets. If you'd like to use GCS, please set it up manually.\n", :yellow - - if @storage.mirror_store.present? - if @storage.mirror_store_file? - say <<~TEXT, :red - It looks like you already have an upload mirror defined, but it refers to a local file store. In order to continue, - we'll need to detach this mirror and replace it with an S3-compatible cloud storage service. - - TEXT - - continue? - else - say <<~TEXT, :red - It looks like you already have an upload mirror defined, and it points to a cloud service. - - TEXT - - if no? <<~TEXT, :bold - Would you like to use this mirror as the primary store for your new Manifold instance? (y/n) - TEXT - continue? "Ok, we can set up a new one, but it will detach your current mirror. Would you like to continue? (y/n)" - else - return - end - end - end - - collect_s3_mirror_info - - say <<~TEXT - Okay, we're setting things up to start mirroring uploads to your bucket. Hold on... - - TEXT - - setup_s3_mirror_env - - maybe_restart_and_enqueue_mirror_job - end - - def maybe_restart_and_enqueue_mirror_job - say <<~TEXT, :yellow - We are now ready to start mirroring uploads. But first, we need to restart the Manifold API. - - Once this happens, all existing uploaded files will start to be copied to the mirror in a background thread. - One thread at a time will be used for this job. Manifold will continue to function, but background jobs may be slower to complete. - TEXT - - return if no? "Do you want to restart Manifold and start uploading now? Manifold will be briefly unavailable while it restarts. (y/n)", :bold - - say "Startin' the jerb" - - UploadMigrationJob.perform_later - - say "Restarting..." - - puts `sudo service restart manifold_api` - end - - def collect_s3_mirror_info - say "Okay, let's collect that data from you." - - @bucket_name = ask("What's the name of the bucket you'll be migrating to?") - @region = ask("What's the region?") - @access_key_id = ask("What's the Access Key Id?") - @secret_access_key = ask("What's the Secret Access Key?") - @endpoint = ask("If you're using a service other than Amazon S3 (i.e. Digital Ocean Spaces), what's the endpoint URL?").presence - @force_path_style = yes?("Force path style URLs? (y/n)", :bold) - - say <<~TEXT - - Bucket Name: #{@bucket_name} - Region: #{@region} - Access Key ID: #{@access_key_id} - Secret Access Key: #{@secret_access_key} - Endpoint: #{@endpoint || "default"} - Force Path Style: #{@force_path_style.to_s} - - TEXT - - collect_s3_info if no? "Does this look correct? (y/n)", :bold - end - - def setup_s3_mirror_env - say_status "Processing", "Looking up environment file...", :yellow - env_file_location = ENV_FILE_OPTIONS.find { |fp| File.exist? fp } - - fatal!("Cannot find environment file") if env_file_location.blank? - - env_file = File.open(env_file_location, "w") - - say_status "Success", "Found environment file at #{env_file.path}." - - say_status "Processing", "Writing upload mirror variables...", :yellow - - env_file.write(s3_env_vars) - - say_status "Success", "Environment updated" - end - - def s3_env_vars - <<~TEXT - - MANIFOLD_SETTINGS_STORAGE_MIRROR='s3' - MANIFOLD_SETTINGS_STORAGE_MIRROR_BUCKET='#{@bucket_name}' - MANIFOLD_SETTINGS_STORAGE_MIRROR_PREFIX='#{@storage.primary_prefix}' - S3_ACCESS_KEY_ID='#{@access_key_id}' - S3_SECRET_ACCESS_KEY='#{@secret_access_key}' - S3_REGION='#{@region}' - S3_FORCE_PATH_STYLE='#{@force_path_style.to_s.upcase}' - #{@endpoint && "S3_ENDPOINT='#{@endpoint}'"} - TEXT - end - - def fatal!(message) - say_error "#{message}\n", :on_red - say_error "The above error is fatal. Exiting.", :red - exit 1 - end - - # Overrides - - # Default bold for asks - def ask(message, color = :bold) - super - end - - # Content methods - - def transition_env_vars - <<~TEXT - DOMAIN='#{@domain}' - MANIFOLD_SETTINGS_STORAGE_PRIMARY='s3' - MANIFOLD_SETTINGS_STORAGE_PRIMARY_BUCKET='#{@bucket_name}' - MANIFOLD_SETTINGS_STORAGE_CACHE_BUCKET='#{@storage.cache_bucket}' - MANIFOLD_SETTINGS_STORAGE_TUS_BUCKET='#{@storage.tus_bucket}' - MANIFOLD_SETTINGS_STORAGE_PRIMARY_PREFIX='#{@storage.primary_prefix}' - MANIFOLD_SETTINGS_STORAGE_CACHE_PREFIX='#{@storage.cache_prefix}' - MANIFOLD_SETTINGS_STORAGE_TUS_PREFIX='#{@storage.tus_prefix}' - S3_ACCESS_KEY_ID='#{@access_key_id}' - S3_SECRET_ACCESS_KEY='#{@secret_access_key}' - S3_REGION='#{@region}' - S3_FORCE_PATH_STYLE='#{@force_path_style.to_s.upcase}' - #{@endpoint && "S3_ENDPOINT='#{@endpoint}'"} - TEXT - end - - def process_outline - <<~TEXT - In order to seamless move from a bare metal installation to a Dockerized install, we need to do a few things: - - SETUP PHASE: - 1) Clone all uploaded assets to a S3-compatible storage bucket (unless they're already in one) - 2) Extract relevant configuration / environment variables - 3) Set up your new Dockerized hosting environment - - Then, when you're ready to make it happen: - - MIGRATION PHASE: - 4) Put this Manifold instance into maintenance/read-only mode - 5) Clone the database to your new hosting environment - 6) Change DNS records to point to the new host - - This tool can help with each step *except* #6. - - TEXT - end - - def cool_manifold_logo - <<-LOGO - Manifold Scholarship - - ╦ - ░░░╬φ╦, ,╓╤ - ░░░╬░░░░░╬╦╦, ,╓╦φ╣╬░░░░ - ░░░ "╚╬░░░░╬▒╦╦, ,╓╦φ╬╬░░░░░╬╩╙" - ░░░ `╙╩╬░░░░╬φ╦╖ ,╓╦φ╣╬░░░░░╬╩╙" - ░░░ "╙╩░░░░░╬╦╦φ╣╬░░░░░╬╩╙" ,╔╦φ - ░░░ ,╓╦╬░░░░░░░░░░░░, ,╓╦φ╬╬░░░░░ - ░░░ ,╓╦φ▒╬░░░░░╬╩╨╠░░░╙╩╬░░░░╬▒╦╦╦φ╬╬░░░░░╩╩╙` - ░░░ ,╦φ╣╬░░░░░╬╩╨" ]░░░ "╙╩░░░░░░░░░ - ░░░╬░░░░░░╩╨" ,╠░░░ "╚╬░░░░░╬╦╦,╔╦ê╬ - ░░╬╩╩" ,╓╦φ╣╬░░░░░░ `╙╩╬░░░░░░░ - ,╓╦φ╣╬░░░░░╬╩╙"╠░░░ `║░░░ - ,╓╦φ╣╬░░░░░╬╩╙" ]░░░ ║░░░ - φ╣╬░░░░░╬╩╨" ,╔╣░░░ ║░░░ - ░╬╩╙` ,╓╦φ╬╬░░░░░░░ ║░░░ - ,╓╦φ╬╬░░░░░╩╩╙` ╠░░░ ║░░░ - ,╓╦φ╣╬░░░░░╩╩"` ]░░░ ║░░░ - ╣╬░░░░░╩╩"` ,╔╦╣░░░ ║░░░ - ╬╩╙` ,╔╦φ╬░░░░░░░░░ ║░░░ - ,╔╦φ╬╬░░░░░╩╩" ]░░░ ║░░░ - ,╔╦φ╣░░░░░░╩╩"` ]░░░ ║░░░ - ░░░░░░╩╩"` ,╦╦╣╬░░░╣╦╦, ║░░░ - ╩"` ,╦╦╣╬░░░░░╬╩╩╙╩╬░░░░╬▒╦╖ ║░░░ - ,,╦╦▒╬░░░░░╬╩╙" `╙╩╬░░░░╬φ╦╓ ║░░░ - ,╦╦ê╣░░░░░░╩╨" "╚╬░░░░░╣╦╦, ║░░░ - ░░░░░╩╩" `╙╩╬░░░░╬╬░░░ - " `╙╩╬░░░ - " - LOGO - end - -end - - - -# This is a long-running job that manages the entire asset migration process -# It will enqueue individual uploads at a pace set by {delay} -# By default, delay is set to process reasonably slow in order to avoid overwhelming the worker -class MirrorUploads - def self.dewit(attachables = nil) - @attachables = attachables || default_attachables - - @attachables.each do |model| - attachments = model.shrine_attachment_configurations.keys - - puts "Mirroring attachments for model #{model.to_s}" - - model.find_each do |record| - attachments.each do |attachment_name| - attacher = record.send("#{attachment_name}_attacher") - next unless attacher.stored? - - puts "Putting #{model.to_s} #{record.id} #{attachment_name}" - attacher.file.trigger_mirror_upload - - attacher.map_derivative(attacher.derivatives) do |_, derivative| - derivative.trigger_mirror_upload - end - end - end - end - - end - - def self.default_attachables - ApplicationRecord.descendants.select { |model| model.include? Attachments } - end - -end - -MirrorUploads.dewit diff --git a/client/script/build-browser-config.js b/client/script/build-browser-config.js index 51489a9514..4000231026 100644 --- a/client/script/build-browser-config.js +++ b/client/script/build-browser-config.js @@ -10,9 +10,9 @@ const output = compileEnv(template); const writePath = `${paths.build}/www/`; /* eslint-disable no-console */ -mkdirp(writePath).then(_ => { +mkdirp(writePath).then(ignored => { fs.writeFileSync(`${paths.build}/www/browser.config.js`, output); -}).catch(_ => { +}).catch(err => { console.error("Unable to mkdir at " + writePath + ": " + err); }); /* eslint-enable no-console */ From 1eccb070b89217e225c42f9ff6e292e515460e1c Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 28 Jan 2026 14:50:48 -0800 Subject: [PATCH 26/43] [F] Add release Rake task --- api/db/structure.sql | 4 ++-- api/lib/tasks/release.rake | 10 ++++++++++ 2 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 api/lib/tasks/release.rake diff --git a/api/db/structure.sql b/api/db/structure.sql index 27707b6730..960904067f 100644 --- a/api/db/structure.sql +++ b/api/db/structure.sql @@ -1,4 +1,4 @@ -\restrict aFbNPfJ9QJhNrDvHxcEauL46R8ZoAcf6sxZdkQYngf6yB0PX4fAzrMSUaWBNy8V +\restrict 71IjF0CpVJbmpl2wkO8TeRrlRW0wJcOIZBAi86pXHPlNwLccXXCcqRdUV89aUYj -- Dumped from database version 13.22 -- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) @@ -7952,7 +7952,7 @@ ALTER TABLE ONLY public.reading_group_composite_entries -- PostgreSQL database dump complete -- -\unrestrict aFbNPfJ9QJhNrDvHxcEauL46R8ZoAcf6sxZdkQYngf6yB0PX4fAzrMSUaWBNy8V +\unrestrict 71IjF0CpVJbmpl2wkO8TeRrlRW0wJcOIZBAi86pXHPlNwLccXXCcqRdUV89aUYj SET search_path TO "$user", public; diff --git a/api/lib/tasks/release.rake b/api/lib/tasks/release.rake new file mode 100644 index 0000000000..2be62a2c95 --- /dev/null +++ b/api/lib/tasks/release.rake @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +namespace :manifold do + desc "Performs release tasks - database migration, database reseed, and version upgrade tasks" + task release: :environment do + Rake::Task["db:migrate"].invoke + Rake::Task["db:seed"].invoke + Rake::Task["manifold:upgrade"].invoke + end +end From 040803665d6abe2fb77d5d44ca86fff967cd0bee Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 28 Jan 2026 15:04:00 -0800 Subject: [PATCH 27/43] [C] Bundle binstubs --- bin/bundle | 114 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100755 bin/bundle diff --git a/bin/bundle b/bin/bundle new file mode 100755 index 0000000000..a71368e323 --- /dev/null +++ b/bin/bundle @@ -0,0 +1,114 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'bundle' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require "rubygems" + +m = Module.new do + module_function + + def invoked_as_script? + File.expand_path($0) == File.expand_path(__FILE__) + end + + def env_var_version + ENV["BUNDLER_VERSION"] + end + + def cli_arg_version + return unless invoked_as_script? # don't want to hijack other binstubs + return unless "update".start_with?(ARGV.first || " ") # must be running `bundle update` + bundler_version = nil + update_index = nil + ARGV.each_with_index do |a, i| + if update_index && update_index.succ == i && a =~ Gem::Version::ANCHORED_VERSION_PATTERN + bundler_version = a + end + next unless a =~ /\A--bundler(?:[= ](#{Gem::Version::VERSION_PATTERN}))?\z/ + bundler_version = $1 + update_index = i + end + bundler_version + end + + def gemfile + gemfile = ENV["BUNDLE_GEMFILE"] + return gemfile if gemfile && !gemfile.empty? + + File.expand_path("../../Gemfile", __FILE__) + end + + def lockfile + lockfile = + case File.basename(gemfile) + when "gems.rb" then gemfile.sub(/\.rb$/, gemfile) + else "#{gemfile}.lock" + end + File.expand_path(lockfile) + end + + def lockfile_version + return unless File.file?(lockfile) + lockfile_contents = File.read(lockfile) + return unless lockfile_contents =~ /\n\nBUNDLED WITH\n\s{2,}(#{Gem::Version::VERSION_PATTERN})\n/ + Regexp.last_match(1) + end + + def bundler_version + @bundler_version ||= + env_var_version || cli_arg_version || + lockfile_version + end + + def bundler_requirement + return "#{Gem::Requirement.default}.a" unless bundler_version + + bundler_gem_version = Gem::Version.new(bundler_version) + + requirement = bundler_gem_version.approximate_recommendation + + return requirement unless Gem::Version.new(Gem::VERSION) < Gem::Version.new("2.7.0") + + requirement += ".a" if bundler_gem_version.prerelease? + + requirement + end + + def load_bundler! + ENV["BUNDLE_GEMFILE"] ||= gemfile + + activate_bundler + end + + def activate_bundler + gem_error = activation_error_handling do + gem "bundler", bundler_requirement + end + return if gem_error.nil? + require_error = activation_error_handling do + require "bundler/version" + end + return if require_error.nil? && Gem::Requirement.new(bundler_requirement).satisfied_by?(Gem::Version.new(Bundler::VERSION)) + warn "Activating bundler (#{bundler_requirement}) failed:\n#{gem_error.message}\n\nTo install the version of bundler this project requires, run `gem install bundler -v '#{bundler_requirement}'`" + exit 42 + end + + def activation_error_handling + yield + nil + rescue StandardError, LoadError => e + e + end +end + +m.load_bundler! + +if m.invoked_as_script? + load Gem.bin_path("bundler", "bundle") +end From 86fb189c64fc8a8f7d21fbb018842183f3cbe387 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 28 Jan 2026 15:04:18 -0800 Subject: [PATCH 28/43] [B] Fix error in fetching Shrine model properties for upgrade task --- api/app/services/storage/shrine_properties.rb | 10 ++++++---- api/db/structure.sql | 4 ++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/api/app/services/storage/shrine_properties.rb b/api/app/services/storage/shrine_properties.rb index 6da588b0e1..9ec6b99b6b 100644 --- a/api/app/services/storage/shrine_properties.rb +++ b/api/app/services/storage/shrine_properties.rb @@ -23,16 +23,18 @@ def shrine_columns_for(model) def models tables.map do |table| - table.classify.constantize - rescue StandardError - nil - end.compact + plural_models + table.classify.safe_constantize + end.compact + plural_models - excluded_models end def plural_models [Settings] end + def excluded_models + [GoodJob] + end + def tables ActiveRecord::Base.connection.tables - %w(schema_migrations comment_hierarchies comments) + %w(settings) end diff --git a/api/db/structure.sql b/api/db/structure.sql index 960904067f..f1d7663b76 100644 --- a/api/db/structure.sql +++ b/api/db/structure.sql @@ -1,4 +1,4 @@ -\restrict 71IjF0CpVJbmpl2wkO8TeRrlRW0wJcOIZBAi86pXHPlNwLccXXCcqRdUV89aUYj +\restrict icifZWZLPtT6Lj46Ay9keEYrjf2KGl0QqeEq4XbrArOKmRHf8Bo8itDhFf6QETT -- Dumped from database version 13.22 -- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) @@ -7952,7 +7952,7 @@ ALTER TABLE ONLY public.reading_group_composite_entries -- PostgreSQL database dump complete -- -\unrestrict 71IjF0CpVJbmpl2wkO8TeRrlRW0wJcOIZBAi86pXHPlNwLccXXCcqRdUV89aUYj +\unrestrict icifZWZLPtT6Lj46Ay9keEYrjf2KGl0QqeEq4XbrArOKmRHf8Bo8itDhFf6QETT SET search_path TO "$user", public; From c012da48b29141d426deb162d6967dc12ee61272 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Mon, 9 Feb 2026 13:41:04 -0800 Subject: [PATCH 29/43] [F] Remove Redis dependency - Replaces Redis with SolidCache / ActiveRecord tables as appropriate --- .gitignore | 2 + api/Gemfile | 7 +- api/Gemfile.lock | 33 +--- .../attachments/process_attachment_job.rb | 6 +- .../purge_legacy_caches_job.rb | 29 ---- .../jobs/resource_import_rows/import_job.rb | 11 +- .../backport_search_index_job.rb | 2 +- api/app/lib/statistics.rb | 19 +-- api/app/models/statistics_record.rb | 18 +++ api/bin/ensure-db | 3 +- api/config/application.rb | 2 +- api/config/database.yml | 45 ++++-- api/config/initializers/10_redis.rb | 5 - api/config/initializers/40_traffic_control.rb | 20 --- api/config/initializers/rack_attack.rb | 107 +++++++------ api/config/puma.rb | 7 - api/config/solid_cache.yml | 21 +++ ..._create_solid_cache_entries.solid_cache.rb | 12 ++ ...size_to_solid_cache_entries.solid_cache.rb | 9 ++ ...ints_to_solid_cache_entries.solid_cache.rb | 12 ++ ...ex_from_solid_cache_entries.solid_cache.rb | 8 + api/db/cache_structure.sql | 141 ++++++++++++++++++ ...0260209183815_create_statistics_records.rb | 14 ++ api/db/structure.sql | 35 ++++- api/lib/manifold_env.rb | 5 - api/lib/manifold_env/oauth_provider.rb | 13 +- api/lib/manifold_env/rate_limiting.rb | 23 ++- api/lib/manifold_env/redis_config.rb | 56 ------- api/lib/tasks/release.rake | 3 +- .../purge_legacy_caches_job_spec.rb | 11 -- api/spec/rails_helper.rb | 5 - docker-compose.yml | 46 +----- docker/manifold.env | 10 +- 33 files changed, 414 insertions(+), 326 deletions(-) delete mode 100644 api/app/jobs/formatted_attributes/purge_legacy_caches_job.rb create mode 100644 api/app/models/statistics_record.rb delete mode 100644 api/config/initializers/10_redis.rb delete mode 100644 api/config/initializers/40_traffic_control.rb create mode 100644 api/config/solid_cache.yml create mode 100644 api/db/cache_migrate/20260209194905_create_solid_cache_entries.solid_cache.rb create mode 100644 api/db/cache_migrate/20260209194906_add_key_hash_and_byte_size_to_solid_cache_entries.solid_cache.rb create mode 100644 api/db/cache_migrate/20260209194907_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.solid_cache.rb create mode 100644 api/db/cache_migrate/20260209194908_remove_key_index_from_solid_cache_entries.solid_cache.rb create mode 100644 api/db/cache_structure.sql create mode 100644 api/db/migrate/20260209183815_create_statistics_records.rb delete mode 100644 api/lib/manifold_env/redis_config.rb delete mode 100644 api/spec/jobs/formatted_attributes/purge_legacy_caches_job_spec.rb diff --git a/.gitignore b/.gitignore index 668baf3934..a0cd7a8420 100644 --- a/.gitignore +++ b/.gitignore @@ -64,3 +64,5 @@ backup-local*.tar /docker/minio/client/* !/docker/minio/client/.keep !/docker/minio/client/initialize.sh + +CLAUDE.md \ No newline at end of file diff --git a/api/Gemfile b/api/Gemfile index 84e96606c1..672cd6c62d 100644 --- a/api/Gemfile +++ b/api/Gemfile @@ -6,8 +6,6 @@ gem "aasm", "~> 4.0" gem "absolute_time", "~> 1.0.0" gem "active_interaction", "~> 4.0" gem "activejob-retry", "~> 0.4" -gem "activejob-traffic_control", "~>0.1.0" -gem "activejob-uniqueness", "~> 0.2.4" gem "activerecord-import", "~> 1.4.1" gem "active_record_upsert", "~> 0.11.1" gem "acts_as_list", "~> 1.2.4" @@ -56,6 +54,7 @@ gem "fast_jsonapi", git: "https://github.com/ManifoldScholar/fast_jsonapi.git", gem "filesize", "~> 0.1" gem "format_engine", "~> 0.7.7" gem "friendly_id", "~> 5.0" +gem "solid_cache", "~> 0.7" gem "fuzzy_match", "~> 2.1.0" gem "geocoder", "1.8.2" gem "gepub", "~> 1.0.4" @@ -105,9 +104,6 @@ gem "rack-cors", "~> 1.0" gem "rails", "~> 7.0" gem "rainbow", "~> 3.0" gem "redcarpet", "~> 3.5" -gem "redis", "~> 4.5" -gem "redis-namespace", "~> 1.0" -gem "redis-objects", "~> 1.4" gem "rolify", "~> 5.1" gem "rubyzip", "~> 2.3.1" gem "scanf", "~> 1.0.0" @@ -169,7 +165,6 @@ end group :test do gem "database_cleaner-active_record", "~> 2.1.0" - gem "database_cleaner-redis", "~> 2.0" gem "test-prof", "~> 1.0" gem "with_model", "~> 2.1" end diff --git a/api/Gemfile.lock b/api/Gemfile.lock index b57e2cf890..b76321ce40 100644 --- a/api/Gemfile.lock +++ b/api/Gemfile.lock @@ -77,13 +77,6 @@ GEM activejob-retry (0.6.3) activejob (>= 4.2) activesupport (>= 4.2) - activejob-traffic_control (0.1.3) - activejob (>= 4.2) - activesupport (>= 4.2) - suo - activejob-uniqueness (0.2.5) - activejob (>= 4.2, < 7.1) - redlock (>= 1.2, < 2) activemodel (7.0.8.7) activesupport (= 7.0.8.7) activemodel-serializers-xml (1.0.3) @@ -192,9 +185,6 @@ GEM activerecord (>= 5.a) database_cleaner-core (~> 2.0.0) database_cleaner-core (2.0.1) - database_cleaner-redis (2.0.0) - database_cleaner-core (~> 2.0.0) - redis date (3.3.3) declarative (0.0.20) device_detector (1.0.7) @@ -495,7 +485,6 @@ GEM minitest (5.20.0) money (6.16.0) i18n (>= 0.6.4, <= 2) - msgpack (1.7.2) multi_json (1.15.0) multi_xml (0.6.0) multipart-post (2.3.0) @@ -639,13 +628,6 @@ GEM ffi (~> 1.0) rbtree (0.4.6) redcarpet (3.6.0) - redis (4.5.1) - redis-namespace (1.11.0) - redis (>= 4) - redis-objects (1.7.0) - redis - redlock (1.3.2) - redis (>= 3.0.0, < 6.0) regexp_parser (2.10.0) representable (3.2.0) declarative (< 0.1.0) @@ -777,6 +759,10 @@ GEM snaky_hash (2.0.1) hashie version_gem (~> 1.1, >= 1.1.1) + solid_cache (0.7.0) + activejob (>= 7) + activerecord (>= 7) + railties (>= 7) sorted_set (1.0.3) rbtree set (~> 1.0) @@ -793,10 +779,6 @@ GEM activerecord (>= 5.2) strip_attributes (1.13.0) activemodel (>= 3.0, < 8.0) - suo (0.4.0) - dalli - msgpack - redis systemu (2.6.5) terminal-table (3.0.2) unicode-display_width (>= 1.1.1, < 3) @@ -866,8 +848,6 @@ DEPENDENCIES active_interaction (~> 4.0) active_record_upsert (~> 0.11.1) activejob-retry (~> 0.4) - activejob-traffic_control (~> 0.1.0) - activejob-uniqueness (~> 0.2.4) activerecord-import (~> 1.4.1) acts-as-taggable-on (~> 9.0) acts_as_list (~> 1.2.4) @@ -894,7 +874,6 @@ DEPENDENCIES dalli (= 2.7.11) data_uri_revived database_cleaner-active_record (~> 2.1.0) - database_cleaner-redis (~> 2.0) dotenv-rails (~> 2.0) draper (~> 4.0.3) dry-core (~> 1.0) @@ -969,9 +948,6 @@ DEPENDENCIES rails (~> 7.0) rainbow (~> 3.0) redcarpet (~> 3.5) - redis (~> 4.5) - redis-namespace (~> 1.0) - redis-objects (~> 1.4) rolify (~> 5.1) rspec-collection_matchers (~> 1.2.0) rspec-core (~> 3.12) @@ -999,6 +975,7 @@ DEPENDENCIES simplecov sinatra (~> 2.2) sixarm_ruby_unaccent (~> 1.2.2) + solid_cache (~> 0.7) sorted_set (~> 1.0, >= 1.0.3) spring spring-commands-rspec (~> 1.0.4) diff --git a/api/app/jobs/attachments/process_attachment_job.rb b/api/app/jobs/attachments/process_attachment_job.rb index e9fe6f83ed..5c3395823f 100644 --- a/api/app/jobs/attachments/process_attachment_job.rb +++ b/api/app/jobs/attachments/process_attachment_job.rb @@ -3,8 +3,12 @@ module Attachments class ProcessAttachmentJob < ApplicationJob include ExclusiveJob + include GoodJob::ActiveJobExtensions::Concurrency - concurrency 1, drop: false unless Rails.env.test? + good_job_control_concurrency_with( + perform_limit: 1, + key: -> { "ProcessAttachmentJob:#{arguments.first.values_at('record').flatten.join(':')}" } + ) discard_on ActiveJob::DeserializationError, ActiveRecord::RecordNotFound diff --git a/api/app/jobs/formatted_attributes/purge_legacy_caches_job.rb b/api/app/jobs/formatted_attributes/purge_legacy_caches_job.rb deleted file mode 100644 index ea7accdb7f..0000000000 --- a/api/app/jobs/formatted_attributes/purge_legacy_caches_job.rb +++ /dev/null @@ -1,29 +0,0 @@ -# frozen_string_literal: true - -module FormattedAttributes - class PurgeLegacyCachesJob < ApplicationJob - queue_as :default - - PATTERNS = %w[ - */plaintext/* - */formatted/* - ].freeze - - # @return [void] - def perform - redis = Redis.new - - PATTERNS.each do |pattern| - cursor = 0 - - loop do - cursor, keys = redis.scan cursor, match: pattern - - redis.del *keys if keys.any? - - break if cursor == "0" - end - end - end - end -end diff --git a/api/app/jobs/resource_import_rows/import_job.rb b/api/app/jobs/resource_import_rows/import_job.rb index 049b966540..3d026960de 100644 --- a/api/app/jobs/resource_import_rows/import_job.rb +++ b/api/app/jobs/resource_import_rows/import_job.rb @@ -3,11 +3,12 @@ # Simple job to process a resource import row module ResourceImportRows class ImportJob < ApplicationJob - # Our acceptance tests use perform_now, which break if this is throttled. - unless Rails.env.test? - # concurrency 6, drop: false - throttle threshold: 3, period: 0.5.seconds, drop: false - end + include GoodJob::ActiveJobExtensions::Concurrency + + good_job_control_concurrency_with( + perform_limit: 3, + key: -> { "ResourceImportRows::ImportJob" } + ) queue_as :low_priority diff --git a/api/app/jobs/text_section_nodes/backport_search_index_job.rb b/api/app/jobs/text_section_nodes/backport_search_index_job.rb index c5675a999c..55575149ea 100644 --- a/api/app/jobs/text_section_nodes/backport_search_index_job.rb +++ b/api/app/jobs/text_section_nodes/backport_search_index_job.rb @@ -6,7 +6,7 @@ class BackportSearchIndexJob < ApplicationJob queue_as :low_priority - unique :until_executed, lock_ttl: 2.days, on_conflict: :log + unique_job! by: :job def build_enumerator(cursor:) enumerator_builder.active_record_on_records( diff --git a/api/app/lib/statistics.rb b/api/app/lib/statistics.rb index 636f3e7511..0454ad8c38 100644 --- a/api/app/lib/statistics.rb +++ b/api/app/lib/statistics.rb @@ -9,13 +9,10 @@ class Statistics include ActiveModel::Conversion include Authority::Abilities include SerializedAbilitiesFor - include Redis::Objects DOWNLOAD_EVENT_NAMES = %w(download_project download_journal).freeze - - lock :transaction, timeout: 1, expiration: 15 - value :this_week, marshal: true - value :last_week, marshal: true + THIS_WEEK_KEY = "statistics:this_week" + LAST_WEEK_KEY = "statistics:last_week" def id 0 @@ -24,30 +21,30 @@ def id # @!attribute [rw] readers_this_week # @return [Float] def readers_this_week - this_week.value.to_f + StatisticsRecord.get(THIS_WEEK_KEY).to_f end def readers_this_week=(new_value) - this_week.value = new_value.to_f + StatisticsRecord.set(THIS_WEEK_KEY, new_value.to_f) end # @!attribute [rw] readers_last_week # @return [Float] def readers_last_week - last_week.value.to_f + StatisticsRecord.get(LAST_WEEK_KEY).to_f end def readers_last_week=(new_value) - last_week.value = new_value + StatisticsRecord.set(LAST_WEEK_KEY, new_value.to_f) end - # Update values in a redis lock. + # Update values within an advisory lock. # # @yieldparam [Statistics] instance the instance itself to update # @yieldreturn [void] # @return [void] def update - transaction_lock.lock do + ApplicationRecord.with_advisory_lock("statistics:update", timeout_seconds: 15) do yield self if block_given? end end diff --git a/api/app/models/statistics_record.rb b/api/app/models/statistics_record.rb new file mode 100644 index 0000000000..c791ff708b --- /dev/null +++ b/api/app/models/statistics_record.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +class StatisticsRecord < ApplicationRecord + validates :key, presence: true, uniqueness: true + validates :value, presence: true + + class << self + def get(key) + find_by(key: key)&.value + end + + def set(key, value) + record = find_or_initialize_by(key: key) + record.update!(value: value) + value + end + end +end diff --git a/api/bin/ensure-db b/api/bin/ensure-db index 2e3ab6d040..ec3832e3e3 100755 --- a/api/bin/ensure-db +++ b/api/bin/ensure-db @@ -3,5 +3,6 @@ set -eux bin/rails db:create -bin/rails db:migrate +bin/rails db:migrate:primary +bin/rails db:migrate:cache bin/rails db:seed diff --git a/api/config/application.rb b/api/config/application.rb index e7bcdd78fd..4accccab62 100644 --- a/api/config/application.rb +++ b/api/config/application.rb @@ -113,6 +113,6 @@ class Application < Rails::Application config.active_record.schema_format = :sql - config.cache_store = :redis_cache_store, ManifoldEnv.redis.cache_options + config.cache_store = :solid_cache_store end end diff --git a/api/config/database.yml b/api/config/database.yml index 9bb75a86a1..856870d717 100644 --- a/api/config/database.yml +++ b/api/config/database.yml @@ -5,17 +5,36 @@ port = ENV["RAILS_DB_PORT"] || 5432 db = ENV["RAILS_DB_NAME"] || 'manifold_development' test_db = ENV["RAILS_TEST_DB_NAME"] || 'manifold_test' + + cache_user = ENV["RAILS_CACHE_DB_USER"] + cache_pass = ENV["RAILS_CACHE_DB_PASS"] + cache_host = ENV["RAILS_CACHE_DB_HOST"] || 'localhost' + cache_port = ENV["RAILS_CACHE_DB_PORT"] || 5432 + cache_db = ENV["RAILS_CACHE_DB_NAME"] || 'manifold_cache_development' + cache_test_db = ENV["RAILS_CACHE_TEST_DB_NAME"] || 'manifold_cache_test' %> common: &common - adapter: postgresql - host: <%= host %> - encoding: unicode - pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 50 } %> - port: <%= port %> - user: <%= user %> - database: <%= db %> - password: "<%= pass %>" + primary: &primary + adapter: postgresql + host: <%= host %> + encoding: unicode + pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 50 } %> + port: <%= port %> + user: <%= user %> + database: <%= db %> + password: "<%= pass %>" + cache: &cache + adapter: postgresql + host: <%= cache_host %> + encoding: unicode + pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 50 } %> + port: <%= cache_port %> + user: <%= cache_user %> + database: <%= cache_db %> + password: "<%= cache_pass %>" + migrations_paths: db/cache_migrate + production: <<: *common @@ -30,5 +49,11 @@ development: <<: *common test: - <<: *common - database: <%= test_db %><%= ENV["TEST_ENV_NUMBER"] %> + primary: + <<: *primary + database: <%= test_db %><%= ENV["TEST_ENV_NUMBER"] %> + cache: + <<: *cache + database: <%= cache_test_db %><%= ENV["TEST_ENV_NUMBER"] %> + + diff --git a/api/config/initializers/10_redis.rb b/api/config/initializers/10_redis.rb deleted file mode 100644 index 4009b9c27d..0000000000 --- a/api/config/initializers/10_redis.rb +++ /dev/null @@ -1,5 +0,0 @@ -# frozen_string_literal: true - -# TODO: Revisit in v7, remove redis-namespace. -ENV["REDIS_NAMESPACE_QUIET"] = "true" # Disable deprecation warning -Redis::Objects.redis = ManifoldEnv.redis.build_connection_pool diff --git a/api/config/initializers/40_traffic_control.rb b/api/config/initializers/40_traffic_control.rb deleted file mode 100644 index 350acf18eb..0000000000 --- a/api/config/initializers/40_traffic_control.rb +++ /dev/null @@ -1,20 +0,0 @@ -# frozen_string_literal: true - -module Patches - module MakeTrafficControlSupportRedisNamespace - def client_class_type(client) - if client.instance_of?(::Redis::Namespace) - Suo::Client::Redis - else - super - end - end - end -end - -ActiveJob::TrafficControl.singleton_class.prepend( - Patches::MakeTrafficControlSupportRedisNamespace -) -ActiveJob::TrafficControl.client = - ManifoldEnv.redis.build_connection_pool "traffic-control", size: 25 -ActiveJob::TrafficControl.cache_client = Rails.cache diff --git a/api/config/initializers/rack_attack.rb b/api/config/initializers/rack_attack.rb index 180aab7f4e..2020f91494 100644 --- a/api/config/initializers/rack_attack.rb +++ b/api/config/initializers/rack_attack.rb @@ -1,69 +1,68 @@ # frozen_string_literal: true -INTERNAL_IP_RANGES = [ - "10.229.0.0/16", - "10.244.0.0/16", - "10.245.0.0/16", - "10.246.0.0/16" -].freeze - -# :nocov: -# We want to ensure that the public IP used by the client is never -# accidentally blocklisted or throttled. -unless Rails.env.development? || Rails.env.test? - # ManifoldEnv.rate_limiting.derive_public_ips! Rails.application.config.manifold.domain -end +# Needs to run after initialization, else Solid Cache won't be ready +# Revisit after upgrading to Solid Cache 1.0 +ActiveSupport::Reloader.to_prepare do + # :nocov: + # We want to ensure that the public IP used by the client is never + # accidentally blocklisted or throttled. + if !(Rails.env.development? || Rails.env.test?) + if ENV["CLIENT_SERVER_IP"] + Rack::Attack.safelist_ip ENV["CLIENT_SERVER_IP"] + else + ManifoldEnv.rate_limiting.derive_public_ips! Rails.application.config.manifold.domain + end + end -INTERNAL_IP_RANGES.each { |ip| Rack::Attack.safelist_ip(ip) } + ManifoldEnv.rate_limiting.public_ips.each do |public_ip| + Rack::Attack.safelist_ip public_ip + end + # :nocov: -ManifoldEnv.rate_limiting.public_ips.each do |public_ip| - Rack::Attack.safelist_ip public_ip -end -# :nocov: + Rack::Attack.safelist("allow all GET requests") do |request| + # We do not currently throttle GET requests. + request.get? + end -Rack::Attack.safelist("allow all GET requests") do |request| - # We do not currently throttle GET requests. - request.get? -end + Rack::Attack.safelist("mark any admin access safe") do |request| + request.env["manifold_env.authorized_admin"] + end -Rack::Attack.safelist("mark any admin access safe") do |request| - request.env["manifold_env.authorized_admin"] -end + Rack::Attack.safelist("skip when disabled globally or per category") do |request| + request.env["manifold_env.rate_limiting_disabled"] + end -Rack::Attack.safelist("skip when disabled globally or per category") do |request| - request.env["manifold_env.rate_limiting_disabled"] -end + ManifoldEnv.rate_limiting.each_throttled_category do |throttler| + Rack::Attack.throttle throttler.email_key, **throttler.options do |request| + request.env["manifold_env.real_email"] if request.env["manifold_env.throttled_category"] == throttler.category + end -ManifoldEnv.rate_limiting.each_throttled_category do |throttler| - Rack::Attack.throttle throttler.email_key, **throttler.options do |request| - request.env["manifold_env.real_email"] if request.env["manifold_env.throttled_category"] == throttler.category + Rack::Attack.throttle throttler.ip_key, **throttler.options do |request| + request.ip if request.env["manifold_env.throttled_category"] == throttler.category + end end - Rack::Attack.throttle throttler.ip_key, **throttler.options do |request| - request.ip if request.env["manifold_env.throttled_category"] == throttler.category + ActiveSupport::Notifications.subscribe("blocklist.rack_attack") do |name, start, finish, request_id, payload| + # :nocov: + ThrottledRequest.track! payload[:request] + # :nocov: end -end -ActiveSupport::Notifications.subscribe("blocklist.rack_attack") do |name, start, finish, request_id, payload| - # :nocov: - ThrottledRequest.track! payload[:request] - # :nocov: -end - -ActiveSupport::Notifications.subscribe("throttle.rack_attack") do |name, start, finish, request_id, payload| - # :nocov: - ThrottledRequest.track! payload[:request] - # :nocov: -end + ActiveSupport::Notifications.subscribe("throttle.rack_attack") do |name, start, finish, request_id, payload| + # :nocov: + ThrottledRequest.track! payload[:request] + # :nocov: + end -Rack::Attack.blocklisted_responder = lambda do |request| - # :nocov: - [503, {}, ["Internal Server Error\n"]] - # :nocov: -end + Rack::Attack.blocklisted_responder = lambda do |request| + # :nocov: + [503, {}, ["Internal Server Error\n"]] + # :nocov: + end -Rack::Attack.throttled_responder = lambda do |request| - # :nocov: - [503, {}, ["Internal Server Error\n"]] - # :nocov: + Rack::Attack.throttled_responder = lambda do |request| + # :nocov: + [503, {}, ["Internal Server Error\n"]] + # :nocov: + end end diff --git a/api/config/puma.rb b/api/config/puma.rb index ba38a6f88c..70fb878c07 100644 --- a/api/config/puma.rb +++ b/api/config/puma.rb @@ -63,13 +63,6 @@ ActiveSupport.on_load(:active_record) do ActiveRecord::Base.connection.disconnect! end - - # Ensure we disconnect from Rails cache on forking. - Rails.cache.redis.disconnect! - - Redis.current.disconnect! - - Redis::Objects.redis.disconnect! end on_worker_boot do diff --git a/api/config/solid_cache.yml b/api/config/solid_cache.yml new file mode 100644 index 0000000000..1ef713acde --- /dev/null +++ b/api/config/solid_cache.yml @@ -0,0 +1,21 @@ +default: &default + database: cache + store_options: + max_age: <%= 1.week.to_i %> + max_size: <%= 256.megabytes %> + namespace: <%= ENV.fetch("RAILS_CACHE_NAMESPACE", "manifold") %> + +production: + <<: *default + +demo: + <<: *default + +staging: + <<: *default + +development: + <<: *default + +test: + <<: *default diff --git a/api/db/cache_migrate/20260209194905_create_solid_cache_entries.solid_cache.rb b/api/db/cache_migrate/20260209194905_create_solid_cache_entries.solid_cache.rb new file mode 100644 index 0000000000..4ff1fbd4d3 --- /dev/null +++ b/api/db/cache_migrate/20260209194905_create_solid_cache_entries.solid_cache.rb @@ -0,0 +1,12 @@ +# This migration comes from solid_cache (originally 20230724121448) +class CreateSolidCacheEntries < ActiveRecord::Migration[7.0] + def change + create_table :solid_cache_entries do |t| + t.binary :key, null: false, limit: 1024 + t.binary :value, null: false, limit: 512.megabytes + t.datetime :created_at, null: false + + t.index :key, unique: true + end + end +end diff --git a/api/db/cache_migrate/20260209194906_add_key_hash_and_byte_size_to_solid_cache_entries.solid_cache.rb b/api/db/cache_migrate/20260209194906_add_key_hash_and_byte_size_to_solid_cache_entries.solid_cache.rb new file mode 100644 index 0000000000..b56e847efd --- /dev/null +++ b/api/db/cache_migrate/20260209194906_add_key_hash_and_byte_size_to_solid_cache_entries.solid_cache.rb @@ -0,0 +1,9 @@ +# This migration comes from solid_cache (originally 20240108155507) +class AddKeyHashAndByteSizeToSolidCacheEntries < ActiveRecord::Migration[7.0] + def change + change_table :solid_cache_entries do |t| + t.column :key_hash, :integer, null: true, limit: 8 + t.column :byte_size, :integer, null: true, limit: 4 + end + end +end diff --git a/api/db/cache_migrate/20260209194907_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.solid_cache.rb b/api/db/cache_migrate/20260209194907_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.solid_cache.rb new file mode 100644 index 0000000000..eadfe43cc3 --- /dev/null +++ b/api/db/cache_migrate/20260209194907_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.solid_cache.rb @@ -0,0 +1,12 @@ +# This migration comes from solid_cache (originally 20240110111600) +class AddKeyHashAndByteSizeIndexesAndNullConstraintsToSolidCacheEntries < ActiveRecord::Migration[7.0] + def change + change_table :solid_cache_entries, bulk: true do |t| + t.change_null :key_hash, false + t.change_null :byte_size, false + t.index :key_hash, unique: true + t.index [:key_hash, :byte_size] + t.index :byte_size + end + end +end diff --git a/api/db/cache_migrate/20260209194908_remove_key_index_from_solid_cache_entries.solid_cache.rb b/api/db/cache_migrate/20260209194908_remove_key_index_from_solid_cache_entries.solid_cache.rb new file mode 100644 index 0000000000..72ee61339d --- /dev/null +++ b/api/db/cache_migrate/20260209194908_remove_key_index_from_solid_cache_entries.solid_cache.rb @@ -0,0 +1,8 @@ +# This migration comes from solid_cache (originally 20240110111702) +class RemoveKeyIndexFromSolidCacheEntries < ActiveRecord::Migration[7.0] + def change + change_table :solid_cache_entries do |t| + t.remove_index :key, unique: true + end + end +end diff --git a/api/db/cache_structure.sql b/api/db/cache_structure.sql new file mode 100644 index 0000000000..51aba49758 --- /dev/null +++ b/api/db/cache_structure.sql @@ -0,0 +1,141 @@ +\restrict IkSphKZuNuXvwhE2tOOX2iTEsemY0b2IYFKptabYU9c4s719I6QMLJTYTP1ZSJa + +-- Dumped from database version 13.22 +-- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +SET default_tablespace = ''; + +SET default_table_access_method = heap; + +-- +-- Name: ar_internal_metadata; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.ar_internal_metadata ( + key character varying NOT NULL, + value character varying, + created_at timestamp(6) without time zone NOT NULL, + updated_at timestamp(6) without time zone NOT NULL +); + + +-- +-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.schema_migrations ( + version character varying NOT NULL +); + + +-- +-- Name: solid_cache_entries; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.solid_cache_entries ( + id bigint NOT NULL, + key bytea NOT NULL, + value bytea NOT NULL, + created_at timestamp(6) without time zone NOT NULL, + key_hash bigint NOT NULL, + byte_size integer NOT NULL +); + + +-- +-- Name: solid_cache_entries_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.solid_cache_entries_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: solid_cache_entries_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.solid_cache_entries_id_seq OWNED BY public.solid_cache_entries.id; + + +-- +-- Name: solid_cache_entries id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.solid_cache_entries ALTER COLUMN id SET DEFAULT nextval('public.solid_cache_entries_id_seq'::regclass); + + +-- +-- Name: ar_internal_metadata ar_internal_metadata_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.ar_internal_metadata + ADD CONSTRAINT ar_internal_metadata_pkey PRIMARY KEY (key); + + +-- +-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.schema_migrations + ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version); + + +-- +-- Name: solid_cache_entries solid_cache_entries_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.solid_cache_entries + ADD CONSTRAINT solid_cache_entries_pkey PRIMARY KEY (id); + + +-- +-- Name: index_solid_cache_entries_on_byte_size; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_solid_cache_entries_on_byte_size ON public.solid_cache_entries USING btree (byte_size); + + +-- +-- Name: index_solid_cache_entries_on_key_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_solid_cache_entries_on_key_hash ON public.solid_cache_entries USING btree (key_hash); + + +-- +-- Name: index_solid_cache_entries_on_key_hash_and_byte_size; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_solid_cache_entries_on_key_hash_and_byte_size ON public.solid_cache_entries USING btree (key_hash, byte_size); + + +-- +-- PostgreSQL database dump complete +-- + +\unrestrict IkSphKZuNuXvwhE2tOOX2iTEsemY0b2IYFKptabYU9c4s719I6QMLJTYTP1ZSJa + +SET search_path TO "$user", public; + +INSERT INTO "schema_migrations" (version) VALUES +('20260209194905'), +('20260209194906'), +('20260209194907'), +('20260209194908'); + + diff --git a/api/db/migrate/20260209183815_create_statistics_records.rb b/api/db/migrate/20260209183815_create_statistics_records.rb new file mode 100644 index 0000000000..d1da1cceb0 --- /dev/null +++ b/api/db/migrate/20260209183815_create_statistics_records.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +class CreateStatisticsRecords < ActiveRecord::Migration[7.0] + def change + create_table :statistics_records, id: :uuid do |t| + t.string :key, null: false + t.float :value, default: 0.0 + + t.timestamps + + t.index :key, unique: true + end + end +end diff --git a/api/db/structure.sql b/api/db/structure.sql index f1d7663b76..70931594a0 100644 --- a/api/db/structure.sql +++ b/api/db/structure.sql @@ -1,4 +1,4 @@ -\restrict icifZWZLPtT6Lj46Ay9keEYrjf2KGl0QqeEq4XbrArOKmRHf8Bo8itDhFf6QETT +\restrict 1r0TdcfyYP4jSHv5hkezx9QOb0oJOLdlTfCMj7lOlAaTEuigzvtkw4QhAyMw7nP -- Dumped from database version 13.22 -- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) @@ -2894,6 +2894,19 @@ CREATE SEQUENCE public.settings_id_seq ALTER SEQUENCE public.settings_id_seq OWNED BY public.settings.id; +-- +-- Name: statistics_records; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.statistics_records ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + key character varying NOT NULL, + value double precision DEFAULT 0.0, + created_at timestamp(6) without time zone NOT NULL, + updated_at timestamp(6) without time zone NOT NULL +); + + -- -- Name: stylesheets; Type: TABLE; Schema: public; Owner: - -- @@ -4272,6 +4285,14 @@ ALTER TABLE ONLY public.settings ADD CONSTRAINT settings_pkey PRIMARY KEY (id); +-- +-- Name: statistics_records statistics_records_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.statistics_records + ADD CONSTRAINT statistics_records_pkey PRIMARY KEY (id); + + -- -- Name: stylesheets stylesheets_pkey; Type: CONSTRAINT; Schema: public; Owner: - -- @@ -6325,6 +6346,13 @@ CREATE INDEX index_roles_on_resource_type_and_resource_id ON public.roles USING CREATE UNIQUE INDEX index_settings_on_singleton_guard ON public.settings USING btree (singleton_guard); +-- +-- Name: index_statistics_records_on_key; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_statistics_records_on_key ON public.statistics_records USING btree (key); + + -- -- Name: index_stylesheets_on_ingestion_source_id; Type: INDEX; Schema: public; Owner: - -- @@ -7952,7 +7980,7 @@ ALTER TABLE ONLY public.reading_group_composite_entries -- PostgreSQL database dump complete -- -\unrestrict icifZWZLPtT6Lj46Ay9keEYrjf2KGl0QqeEq4XbrArOKmRHf8Bo8itDhFf6QETT +\unrestrict 1r0TdcfyYP4jSHv5hkezx9QOb0oJOLdlTfCMj7lOlAaTEuigzvtkw4QhAyMw7nP SET search_path TO "$user", public; @@ -8325,6 +8353,7 @@ INSERT INTO "schema_migrations" (version) VALUES ('20251120233556'), ('20251121202033'), ('20251203230443'), -('20251203231940'); +('20251203231940'), +('20260209183815'); diff --git a/api/lib/manifold_env.rb b/api/lib/manifold_env.rb index 535f525890..e3093b7d82 100644 --- a/api/lib/manifold_env.rb +++ b/api/lib/manifold_env.rb @@ -13,7 +13,6 @@ module ManifoldEnv autoload :OauthConfig autoload :OauthProvider autoload :RateLimiting - autoload :RedisConfig autoload :Types end @@ -24,10 +23,6 @@ module ManifoldEnv mattr_accessor :rate_limiting do ManifoldEnv::RateLimiting.new end - - mattr_accessor :redis do - ManifoldEnv::RedisConfig.new - end end ManifoldEnv.eager_load! diff --git a/api/lib/manifold_env/oauth_provider.rb b/api/lib/manifold_env/oauth_provider.rb index 77a8447468..dd96e40408 100644 --- a/api/lib/manifold_env/oauth_provider.rb +++ b/api/lib/manifold_env/oauth_provider.rb @@ -6,21 +6,20 @@ class OauthProvider include Equalizer.new(:name) include ManifoldEnv::HasConfigurationDSL include ActiveModel::Validations - include Redis::Objects - - value :app_id - value :secret CREDENTIAL_KEYS = %i(id secret).freeze validates :credentials, presence: { message: "are unset" } attr_reader :name + attr_accessor :app_id, :secret alias id name def initialize(name) @name = name + @app_id = nil + @secret = nil end def <=>(other) @@ -40,7 +39,7 @@ def =~(other) def credentials return nil unless has_credentials? - custom? ? custom.credentials : [app_id.value, secret.value] + custom? ? custom.credentials : [app_id, secret] end # @!attribute [r] custom @@ -71,7 +70,7 @@ def has_app_id? if custom? custom.client_id.present? else - app_id.value.present? + app_id.present? end end @@ -83,7 +82,7 @@ def has_secret? if custom? custom.client_secret.present? else - secret.value.present? + secret.present? end end diff --git a/api/lib/manifold_env/rate_limiting.rb b/api/lib/manifold_env/rate_limiting.rb index 543b735c1b..fbe45cd9c0 100644 --- a/api/lib/manifold_env/rate_limiting.rb +++ b/api/lib/manifold_env/rate_limiting.rb @@ -6,13 +6,14 @@ module ManifoldEnv class RateLimiting include DefinesRateLimits - include Redis::Objects DNS_SERVERS = %w[ 8.8.8.8 8.8.4.4 ].freeze + PUBLIC_IPS_CACHE_KEY = "rate_limiting:public_ips" + map_throttle! :comment_creation, limit: 10, period: 3600 map_throttle! :public_annotation_creation, limit: 5, period: 300 @@ -21,10 +22,6 @@ class RateLimiting map_throttle! :registration, limit: 5, period: 86_400 - # We store the public IP(s) for the Manifold application - # so that the client does not accidentally get throttled. - set :public_ips - def id 1 end @@ -37,6 +34,11 @@ def each_throttled_category end end + # @return [Set] + def public_ips + Rails.cache.read(PUBLIC_IPS_CACHE_KEY) || Set.new + end + # @param [String] domain # @return [void] def derive_public_ips!(domain) @@ -47,7 +49,7 @@ def derive_public_ips!(domain) end rescue Resolv::ResolvError # :nocov: - public_ips.clear + clear_public_ips! # :nocov: end @@ -57,12 +59,17 @@ def derive_public_ips!(domain) # @return [void] def reset_public_ips!(new_ips) if new_ips.present? - self.public_ips = new_ips + Rails.cache.write(PUBLIC_IPS_CACHE_KEY, new_ips.to_set) else - public_ips.clear + clear_public_ips! end end + # @return [void] + def clear_public_ips! + Rails.cache.delete(PUBLIC_IPS_CACHE_KEY) + end + # @api private class Throttler include Dry::Core::Equalizer.new(:category) diff --git a/api/lib/manifold_env/redis_config.rb b/api/lib/manifold_env/redis_config.rb deleted file mode 100644 index ca45bedf2d..0000000000 --- a/api/lib/manifold_env/redis_config.rb +++ /dev/null @@ -1,56 +0,0 @@ -# frozen_string_literal: true - -module ManifoldEnv - class RedisConfig - attr_reader :url - - # @param [String] url - # @param [String] namespace_prefix - def initialize(url: default_url, namespace_prefix: nil) - @url = url - @namespace_prefix = namespace_prefix || default_namespace_prefix - end - - def namespace(*parts) - [@namespace_prefix, *parts].join(":") - end - - def namespaced_url(*parts) - "#{url}/#{namespace(*parts)}" - end - - def cache_options - { - namespace: namespace("cache"), - url: url - } - end - - def sidekiq_options - { - url: url, - namespace: "#{namespace('sidekiq')}:" - } - end - - def build_connection_pool(*namespace_parts, size: 5, timeout: 5) - ConnectionPool.new size: size, timeout: timeout do - build_connection(*namespace_parts) - end - end - - def build_connection(*namespace_parts) - Redis::Namespace.new(namespace(*namespace_parts), redis: Redis.new(url: url)) - end - - private - - def default_namespace_prefix - ENV["RAILS_REDIS_NAMESPACE"] || "manifold" - end - - def default_url - ENV["REDIS_URL"] || ENV["BOXEN_REDIS_URL"] || ENV["RAILS_REDIS_URL"] || "redis://127.0.0.1:6379" - end - end -end diff --git a/api/lib/tasks/release.rake b/api/lib/tasks/release.rake index 2be62a2c95..83dcd5b9d3 100644 --- a/api/lib/tasks/release.rake +++ b/api/lib/tasks/release.rake @@ -3,7 +3,8 @@ namespace :manifold do desc "Performs release tasks - database migration, database reseed, and version upgrade tasks" task release: :environment do - Rake::Task["db:migrate"].invoke + Rake::Task["db:migrate:primary"].invoke + Rake::Task["db:migrate:cache"].invoke Rake::Task["db:seed"].invoke Rake::Task["manifold:upgrade"].invoke end diff --git a/api/spec/jobs/formatted_attributes/purge_legacy_caches_job_spec.rb b/api/spec/jobs/formatted_attributes/purge_legacy_caches_job_spec.rb deleted file mode 100644 index 2fec5ef29c..0000000000 --- a/api/spec/jobs/formatted_attributes/purge_legacy_caches_job_spec.rb +++ /dev/null @@ -1,11 +0,0 @@ -# frozen_string_literal: true - -require 'rails_helper' - -RSpec.describe FormattedAttributes::PurgeLegacyCachesJob, type: :job do - it "runs without issue" do - expect do - described_class.perform_now - end.to execute_safely - end -end diff --git a/api/spec/rails_helper.rb b/api/spec/rails_helper.rb index f78e9047ca..3a878affab 100644 --- a/api/spec/rails_helper.rb +++ b/api/spec/rails_helper.rb @@ -110,8 +110,6 @@ # If you are not using ActiveRecord, you can remove this line. ActiveRecord::Migration.maintain_test_schema! -ActiveJob::Uniqueness.test_mode! - TestProf::FactoryDefault.configure do |config| config.preserve_attributes = true config.preserve_traits = true @@ -189,10 +187,7 @@ # Truncate all test database tables before running tests. config.before(:suite) do DatabaseCleaner[:active_record].strategy = :transaction - DatabaseCleaner[:redis].strategy = :deletion - DatabaseCleaner[:active_record].clean_with(:truncation) - DatabaseCleaner[:redis].clean_with(:deletion) Scenic.database.views.select(&:materialized).each do |view| Scenic.database.refresh_materialized_view view.name, concurrently: false, cascade: false diff --git a/docker-compose.yml b/docker-compose.yml index fcf1a15a97..730198ea15 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -22,40 +22,6 @@ services: interval: 10s timeout: 5s retries: 5 - redis: - image: redis:6.2.7 - platform: linux/amd64 - environment: - - "ALLOW_EMPTY_PASSWORD=yes" - logging: - driver: json-file - options: - max-size: "10m" - max-file: "10" - restart: unless-stopped - volumes: - - redis-data:/data - healthcheck: - test: ["CMD", "redis-cli", "--raw", "incr", "ping"] - interval: 30s - timeout: 5s - retries: 5 - test-redis: - image: redis:6.2.7 - platform: linux/amd64 - environment: - - "ALLOW_EMPTY_PASSWORD=yes" - logging: - driver: json-file - options: - max-size: "10m" - max-file: "10" - restart: unless-stopped - healthcheck: - test: ["CMD", "redis-cli", "--raw", "incr", "ping"] - interval: 30s - timeout: 5s - retries: 5 migrations: build: context: api @@ -64,8 +30,6 @@ services: depends_on: postgres: condition: service_healthy - redis: - condition: service_healthy env_file: docker/manifold.env logging: driver: json-file @@ -141,8 +105,6 @@ services: condition: service_completed_successfully minio-client: condition: service_completed_successfully - redis: - condition: service_healthy env_file: - ./docker/manifold.env environment: @@ -210,15 +172,13 @@ services: condition: service_healthy migrations: condition: service_completed_successfully - test-redis: - condition: service_healthy env_file: - ./docker/manifold.env environment: - RACK_ENV=test - RAILS_ENV=test - - RAILS_REDIS_URL=redis://test-redis:6379 - - REDIS_URL=redis://test-redis:6379 + - DATABASE_URL=postgres://postgres:password@postgres/manifold_test + - CACHE_DATABASE_URL=postgres://postgres:password@postgres/manifold_cache_test logging: driver: json-file options: @@ -246,8 +206,6 @@ volumes: driver: local rails-tmp: driver: local - redis-data: - driver: local uploads: driver: local node_modules: diff --git a/docker/manifold.env b/docker/manifold.env index 7fbe2fca0a..63b6304c60 100644 --- a/docker/manifold.env +++ b/docker/manifold.env @@ -20,13 +20,9 @@ CLIENT_SERVER_PROXIES=true DISABLE_SPRING=always RAILS_ENV=development RAILS_SECRET_KEY=6234a9eada2709680e0db091d48fe7973f6eb23f413d9b5c2b9d17149c9e38e7309a897b6a5231297b89ac6d3c7494d40c7d6454f342c04f8743482f610016aa -RAILS_DB_USER=postgres -RAILS_DB_PASS=password -RAILS_DB_HOST=postgres -RAILS_DB_PORT=5432 -RAILS_DB_NAME=manifold_development -RAILS_REDIS_URL=redis://redis:6379 -REDIS_URL=redis://redis:6379 + +DATABASE_URL="postgres://postgres:password@postgres/manifold_development" +CACHE_DATABASE_URL="postgres://postgres:password@postgres/manifold_cache_development" SERVER_PORT=4000 From a3a8ccbe1e856c04945238b57743adfd0d7f87a7 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Mon, 9 Feb 2026 14:02:36 -0800 Subject: [PATCH 30/43] [B] Fix CI --- .github/workflows/continuous_integration.yml | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/.github/workflows/continuous_integration.yml b/.github/workflows/continuous_integration.yml index af73dc4fd6..b4f2bfef91 100644 --- a/.github/workflows/continuous_integration.yml +++ b/.github/workflows/continuous_integration.yml @@ -8,10 +8,11 @@ on: env: BROWSERSLIST_IGNORE_OLD_DATA: beQuiet - DATABASE_URL: "postgres://postgres:postgres@localhost:5432/manifold_test" DISABLE_SPRING: yes PATH: /usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin:/usr/local/sbin RAILS_ENV: test + DATABASE_URL: "postgres://postgres:postgres@localhost:5432/manifold_test" + CACHE_DATABASE_URL: "postgres://postgres:postgres@localhost:5432/manifold_cache_test" jobs: client-lint: @@ -59,14 +60,8 @@ jobs: env: POSTGRES_USER: postgres POSTGRES_PASSWORD: postgres - POSTGRES_DB: manifold_test options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 - redis: - image: redis:6.2.7-alpine - ports: ["6379:6379"] - options: --entrypoint redis-server - steps: - name: "Checkout code" uses: actions/checkout@v3 @@ -82,7 +77,7 @@ jobs: - name: "Setup test database" working-directory: "./api" - run: bin/rails db:setup + run: bin/rails db:test:prepare - name: "Run API specs" working-directory: "./api" From 878cc73c5d092839c91e63fc8ed32cdc4ccae91e Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Tue, 10 Feb 2026 11:34:52 -0800 Subject: [PATCH 31/43] [C] Lint --- api/Dockerfile | 4 ++-- api/config/initializers/rack_attack.rb | 2 +- ...9194905_create_solid_cache_entries.solid_cache.rb | 2 ++ ...d_byte_size_to_solid_cache_entries.solid_cache.rb | 4 +++- ...constraints_to_solid_cache_entries.solid_cache.rb | 2 ++ ...key_index_from_solid_cache_entries.solid_cache.rb | 2 ++ client/script/build-browser-config.js | 12 +++++++----- client/src/servers/common/ProxyHelper.js | 6 ++++-- 8 files changed, 23 insertions(+), 11 deletions(-) diff --git a/api/Dockerfile b/api/Dockerfile index b16c97c0d7..d8d7b01693 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -64,6 +64,6 @@ RUN bundle install CMD ["bin/puma", "-C", "config/puma.rb"] # There are currently no differences between dev and prod Dockerfiles, but these are here to provide parity with the client Dockerfile -FROM base AS dev +FROM base AS development -FROM base AS prod \ No newline at end of file +FROM base AS production \ No newline at end of file diff --git a/api/config/initializers/rack_attack.rb b/api/config/initializers/rack_attack.rb index 2020f91494..a96f088eeb 100644 --- a/api/config/initializers/rack_attack.rb +++ b/api/config/initializers/rack_attack.rb @@ -6,7 +6,7 @@ # :nocov: # We want to ensure that the public IP used by the client is never # accidentally blocklisted or throttled. - if !(Rails.env.development? || Rails.env.test?) + unless Rails.env.development? || Rails.env.test? if ENV["CLIENT_SERVER_IP"] Rack::Attack.safelist_ip ENV["CLIENT_SERVER_IP"] else diff --git a/api/db/cache_migrate/20260209194905_create_solid_cache_entries.solid_cache.rb b/api/db/cache_migrate/20260209194905_create_solid_cache_entries.solid_cache.rb index 4ff1fbd4d3..d52baee79a 100644 --- a/api/db/cache_migrate/20260209194905_create_solid_cache_entries.solid_cache.rb +++ b/api/db/cache_migrate/20260209194905_create_solid_cache_entries.solid_cache.rb @@ -1,3 +1,5 @@ +# frozen_string_literal: true + # This migration comes from solid_cache (originally 20230724121448) class CreateSolidCacheEntries < ActiveRecord::Migration[7.0] def change diff --git a/api/db/cache_migrate/20260209194906_add_key_hash_and_byte_size_to_solid_cache_entries.solid_cache.rb b/api/db/cache_migrate/20260209194906_add_key_hash_and_byte_size_to_solid_cache_entries.solid_cache.rb index b56e847efd..2dd1a5de12 100644 --- a/api/db/cache_migrate/20260209194906_add_key_hash_and_byte_size_to_solid_cache_entries.solid_cache.rb +++ b/api/db/cache_migrate/20260209194906_add_key_hash_and_byte_size_to_solid_cache_entries.solid_cache.rb @@ -1,7 +1,9 @@ +# frozen_string_literal: true + # This migration comes from solid_cache (originally 20240108155507) class AddKeyHashAndByteSizeToSolidCacheEntries < ActiveRecord::Migration[7.0] def change - change_table :solid_cache_entries do |t| + change_table :solid_cache_entries do |t| # rubocop:disable Rails/BulkChangeTable t.column :key_hash, :integer, null: true, limit: 8 t.column :byte_size, :integer, null: true, limit: 4 end diff --git a/api/db/cache_migrate/20260209194907_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.solid_cache.rb b/api/db/cache_migrate/20260209194907_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.solid_cache.rb index eadfe43cc3..a20239eab1 100644 --- a/api/db/cache_migrate/20260209194907_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.solid_cache.rb +++ b/api/db/cache_migrate/20260209194907_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.solid_cache.rb @@ -1,3 +1,5 @@ +# frozen_string_literal: true + # This migration comes from solid_cache (originally 20240110111600) class AddKeyHashAndByteSizeIndexesAndNullConstraintsToSolidCacheEntries < ActiveRecord::Migration[7.0] def change diff --git a/api/db/cache_migrate/20260209194908_remove_key_index_from_solid_cache_entries.solid_cache.rb b/api/db/cache_migrate/20260209194908_remove_key_index_from_solid_cache_entries.solid_cache.rb index 72ee61339d..2f7e1f2486 100644 --- a/api/db/cache_migrate/20260209194908_remove_key_index_from_solid_cache_entries.solid_cache.rb +++ b/api/db/cache_migrate/20260209194908_remove_key_index_from_solid_cache_entries.solid_cache.rb @@ -1,3 +1,5 @@ +# frozen_string_literal: true + # This migration comes from solid_cache (originally 20240110111702) class RemoveKeyIndexFromSolidCacheEntries < ActiveRecord::Migration[7.0] def change diff --git a/client/script/build-browser-config.js b/client/script/build-browser-config.js index 4000231026..bcfcb7a1b2 100644 --- a/client/script/build-browser-config.js +++ b/client/script/build-browser-config.js @@ -10,9 +10,11 @@ const output = compileEnv(template); const writePath = `${paths.build}/www/`; /* eslint-disable no-console */ -mkdirp(writePath).then(ignored => { - fs.writeFileSync(`${paths.build}/www/browser.config.js`, output); -}).catch(err => { - console.error("Unable to mkdir at " + writePath + ": " + err); -}); +mkdirp(writePath) + .then(ignored => { + fs.writeFileSync(`${paths.build}/www/browser.config.js`, output); + }) + .catch(err => { + console.error("Unable to mkdir at " + writePath + ": " + err); + }); /* eslint-enable no-console */ diff --git a/client/src/servers/common/ProxyHelper.js b/client/src/servers/common/ProxyHelper.js index 8f638148d6..72eb745ad4 100644 --- a/client/src/servers/common/ProxyHelper.js +++ b/client/src/servers/common/ProxyHelper.js @@ -20,7 +20,7 @@ class ProxyHelper { target, logLevel: proxyDebug ? logLevel : "silent", changeOrigin: true, - onError: (err, req, res) => { + onError: (err, req, ignored) => { ch.error( `[Proxy Error] ${this.name} | ${proxyPath} -> ${target} | ${req.method} ${req.url}` ); @@ -42,7 +42,9 @@ class ProxyHelper { `[Proxy Res] ${this.name} | ${req.method} ${req.url} | Status: ${proxyRes.statusCode}` ); if (proxyRes.headers.location) { - ch.info(`[Proxy Res] Redirect Location: ${proxyRes.headers.location}`); + ch.info( + `[Proxy Res] Redirect Location: ${proxyRes.headers.location}` + ); } }; } From 7d1b2f61fa0a3d973d312950bfc2ffb6853af01a Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Tue, 10 Feb 2026 13:44:58 -0800 Subject: [PATCH 32/43] [B] Fix Lockbox key generation in CI tests --- api/config/initializers/25_lockbox.rb | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/api/config/initializers/25_lockbox.rb b/api/config/initializers/25_lockbox.rb index e6b862ef4c..dd4edd5d29 100644 --- a/api/config/initializers/25_lockbox.rb +++ b/api/config/initializers/25_lockbox.rb @@ -1,8 +1,6 @@ # frozen_string_literal: true -secret_key_base = Rails.application.secret_key_base - -secret_key_base = secret_key_base.presence || Lockbox.generate_key if Rails.env.test? +secret_key_base = Rails.env.test? ? Lockbox.generate_key : Rails.application.secret_key_base # Consistently ensure that it's a 64-character hexadecimal key enforced_master_key = secret_key_base.gsub(/[^a-zA-Z0-9]+/, "")[/\A([a-zA-Z0-9]{1,64})/, 1].rjust(64, "0") From c9e5f870ea0b44f85b28a0fa7cb005f89e7ede77 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Tue, 17 Feb 2026 14:15:39 -0800 Subject: [PATCH 33/43] [E] Refactor statistics to use Rails cache --- api/app/lib/statistics.rb | 8 ++++---- api/app/models/statistics_record.rb | 18 ------------------ ...20260209183815_create_statistics_records.rb | 14 -------------- 3 files changed, 4 insertions(+), 36 deletions(-) delete mode 100644 api/app/models/statistics_record.rb delete mode 100644 api/db/migrate/20260209183815_create_statistics_records.rb diff --git a/api/app/lib/statistics.rb b/api/app/lib/statistics.rb index 0454ad8c38..fbdf241f21 100644 --- a/api/app/lib/statistics.rb +++ b/api/app/lib/statistics.rb @@ -21,21 +21,21 @@ def id # @!attribute [rw] readers_this_week # @return [Float] def readers_this_week - StatisticsRecord.get(THIS_WEEK_KEY).to_f + Rails.cache.read(THIS_WEEK_KEY).to_f end def readers_this_week=(new_value) - StatisticsRecord.set(THIS_WEEK_KEY, new_value.to_f) + Rails.cache.write(THIS_WEEK_KEY, new_value.to_f) end # @!attribute [rw] readers_last_week # @return [Float] def readers_last_week - StatisticsRecord.get(LAST_WEEK_KEY).to_f + Rails.cache.read(LAST_WEEK_KEY).to_f end def readers_last_week=(new_value) - StatisticsRecord.set(LAST_WEEK_KEY, new_value.to_f) + Rails.cache.write(LAST_WEEK_KEY, new_value.to_f) end # Update values within an advisory lock. diff --git a/api/app/models/statistics_record.rb b/api/app/models/statistics_record.rb deleted file mode 100644 index c791ff708b..0000000000 --- a/api/app/models/statistics_record.rb +++ /dev/null @@ -1,18 +0,0 @@ -# frozen_string_literal: true - -class StatisticsRecord < ApplicationRecord - validates :key, presence: true, uniqueness: true - validates :value, presence: true - - class << self - def get(key) - find_by(key: key)&.value - end - - def set(key, value) - record = find_or_initialize_by(key: key) - record.update!(value: value) - value - end - end -end diff --git a/api/db/migrate/20260209183815_create_statistics_records.rb b/api/db/migrate/20260209183815_create_statistics_records.rb deleted file mode 100644 index d1da1cceb0..0000000000 --- a/api/db/migrate/20260209183815_create_statistics_records.rb +++ /dev/null @@ -1,14 +0,0 @@ -# frozen_string_literal: true - -class CreateStatisticsRecords < ActiveRecord::Migration[7.0] - def change - create_table :statistics_records, id: :uuid do |t| - t.string :key, null: false - t.float :value, default: 0.0 - - t.timestamps - - t.index :key, unique: true - end - end -end From ed776d6e8454594b30cd6a96ba69b1ac12a746bc Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Tue, 17 Feb 2026 14:56:12 -0800 Subject: [PATCH 34/43] [C] Regenerate structure.sql --- api/db/structure.sql | 32 ++------------------------------ 1 file changed, 2 insertions(+), 30 deletions(-) diff --git a/api/db/structure.sql b/api/db/structure.sql index 70931594a0..2da37d7691 100644 --- a/api/db/structure.sql +++ b/api/db/structure.sql @@ -1,4 +1,4 @@ -\restrict 1r0TdcfyYP4jSHv5hkezx9QOb0oJOLdlTfCMj7lOlAaTEuigzvtkw4QhAyMw7nP +\restrict 7fnUzpj3JzQzbeySgHXVLnFP3tarWQ0mS7j6unEKMZpcf8OwqiqRNuTS1Vni6xm -- Dumped from database version 13.22 -- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) @@ -2894,19 +2894,6 @@ CREATE SEQUENCE public.settings_id_seq ALTER SEQUENCE public.settings_id_seq OWNED BY public.settings.id; --- --- Name: statistics_records; Type: TABLE; Schema: public; Owner: - --- - -CREATE TABLE public.statistics_records ( - id uuid DEFAULT gen_random_uuid() NOT NULL, - key character varying NOT NULL, - value double precision DEFAULT 0.0, - created_at timestamp(6) without time zone NOT NULL, - updated_at timestamp(6) without time zone NOT NULL -); - - -- -- Name: stylesheets; Type: TABLE; Schema: public; Owner: - -- @@ -4285,14 +4272,6 @@ ALTER TABLE ONLY public.settings ADD CONSTRAINT settings_pkey PRIMARY KEY (id); --- --- Name: statistics_records statistics_records_pkey; Type: CONSTRAINT; Schema: public; Owner: - --- - -ALTER TABLE ONLY public.statistics_records - ADD CONSTRAINT statistics_records_pkey PRIMARY KEY (id); - - -- -- Name: stylesheets stylesheets_pkey; Type: CONSTRAINT; Schema: public; Owner: - -- @@ -6346,13 +6325,6 @@ CREATE INDEX index_roles_on_resource_type_and_resource_id ON public.roles USING CREATE UNIQUE INDEX index_settings_on_singleton_guard ON public.settings USING btree (singleton_guard); --- --- Name: index_statistics_records_on_key; Type: INDEX; Schema: public; Owner: - --- - -CREATE UNIQUE INDEX index_statistics_records_on_key ON public.statistics_records USING btree (key); - - -- -- Name: index_stylesheets_on_ingestion_source_id; Type: INDEX; Schema: public; Owner: - -- @@ -7980,7 +7952,7 @@ ALTER TABLE ONLY public.reading_group_composite_entries -- PostgreSQL database dump complete -- -\unrestrict 1r0TdcfyYP4jSHv5hkezx9QOb0oJOLdlTfCMj7lOlAaTEuigzvtkw4QhAyMw7nP +\unrestrict 7fnUzpj3JzQzbeySgHXVLnFP3tarWQ0mS7j6unEKMZpcf8OwqiqRNuTS1Vni6xm SET search_path TO "$user", public; From e5aa4d39d8c62385255ae229d025c00114817fb8 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Mon, 23 Feb 2026 12:04:52 -0800 Subject: [PATCH 35/43] [B] Fix Good Job concurrency key generation for ProcessAttachmentJob --- api/app/jobs/attachments/process_attachment_job.rb | 2 +- api/db/cache_structure.sql | 4 ++-- api/db/structure.sql | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/app/jobs/attachments/process_attachment_job.rb b/api/app/jobs/attachments/process_attachment_job.rb index 5c3395823f..52d960caad 100644 --- a/api/app/jobs/attachments/process_attachment_job.rb +++ b/api/app/jobs/attachments/process_attachment_job.rb @@ -7,7 +7,7 @@ class ProcessAttachmentJob < ApplicationJob good_job_control_concurrency_with( perform_limit: 1, - key: -> { "ProcessAttachmentJob:#{arguments.first.values_at('record').flatten.join(':')}" } + key: -> { "ProcessAttachmentJob:#{arguments.slice(1, 2).flatten.join(':')}" } ) discard_on ActiveJob::DeserializationError, ActiveRecord::RecordNotFound diff --git a/api/db/cache_structure.sql b/api/db/cache_structure.sql index 51aba49758..7adeae253b 100644 --- a/api/db/cache_structure.sql +++ b/api/db/cache_structure.sql @@ -1,4 +1,4 @@ -\restrict IkSphKZuNuXvwhE2tOOX2iTEsemY0b2IYFKptabYU9c4s719I6QMLJTYTP1ZSJa +\restrict 3qs0dn7x2lAiDdJ78j9TPjEfnB42pwWMWe3Luc3keyz7FNntcF1g47wu7y4jDaF -- Dumped from database version 13.22 -- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) @@ -128,7 +128,7 @@ CREATE INDEX index_solid_cache_entries_on_key_hash_and_byte_size ON public.solid -- PostgreSQL database dump complete -- -\unrestrict IkSphKZuNuXvwhE2tOOX2iTEsemY0b2IYFKptabYU9c4s719I6QMLJTYTP1ZSJa +\unrestrict 3qs0dn7x2lAiDdJ78j9TPjEfnB42pwWMWe3Luc3keyz7FNntcF1g47wu7y4jDaF SET search_path TO "$user", public; diff --git a/api/db/structure.sql b/api/db/structure.sql index 2da37d7691..88f6c3406a 100644 --- a/api/db/structure.sql +++ b/api/db/structure.sql @@ -1,4 +1,4 @@ -\restrict 7fnUzpj3JzQzbeySgHXVLnFP3tarWQ0mS7j6unEKMZpcf8OwqiqRNuTS1Vni6xm +\restrict OI1rgHalIvDF5UL29S8nJbvhRJKIUusBSAEJwbetaiPj6bhTFIJggTwTu84S6lU -- Dumped from database version 13.22 -- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) @@ -7952,7 +7952,7 @@ ALTER TABLE ONLY public.reading_group_composite_entries -- PostgreSQL database dump complete -- -\unrestrict 7fnUzpj3JzQzbeySgHXVLnFP3tarWQ0mS7j6unEKMZpcf8OwqiqRNuTS1Vni6xm +\unrestrict OI1rgHalIvDF5UL29S8nJbvhRJKIUusBSAEJwbetaiPj6bhTFIJggTwTu84S6lU SET search_path TO "$user", public; From 240ae2bacb71a647c452eb84c7fffbbdc923b48d Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Tue, 10 Mar 2026 15:30:50 -0700 Subject: [PATCH 36/43] [F] Add Docker build and release CI - Creates API and client images and pushes to both GHCR and DOCR --- .github/workflows/docker.yml | 113 ++++++++++++++++++++++++++++++++++ .github/workflows/release.yml | 22 +++++++ client/Dockerfile | 4 +- 3 files changed, 137 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/docker.yml create mode 100644 .github/workflows/release.yml diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000000..b8da1e24f4 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,113 @@ +name: "Build & Push Docker" + +on: + push: + branches: + - next # Next, tags as "next" + - master # Edge, tags as "edge" + - release # Production, tags as "latest" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + +env: + GHCR_REGISTRY: ghcr.io + GHCR_IMAGE_NAME: ${{ github.repository }} + MANAGED_REGISTRY: registry.digitalocean.com + MANAGED_IMAGE_NAME: manifold/manifold + +jobs: + build-api: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Login GHCR + uses: docker/login-action@v4 + with: + registry: ${{ env.GHCR_REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Login DOCR + uses: docker/login-action@v4 + with: + registry: ${{ env.MANAGED_REGISTRY }} + username: docr + password: ${{ secrets.DOCR_TOKEN }} + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ${{ env.GHCR_REGISTRY }}/${{ env.GHCR_IMAGE_NAME }}-api + ${{ env.MANAGED_REGISTRY }}/${{ env.MANAGED_IMAGE_NAME }}-api + tags: | + type=raw,value=latest,enable=${{ github.ref_name == 'release' }} + type=raw,value=edge,enable={{ is_default_branch }} + type=ref,event=branch + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Build and push Docker + id: push + uses: docker/build-push-action@v5 + with: + context: ./api + target: production + push: true + cache-from: type=gha + cache-to: type=gha,mode=max + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: + "RAILS_ENV=production" + + build-client: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Login GHCR + uses: docker/login-action@v4 + with: + registry: ${{ env.GHCR_REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Login DOCR + uses: docker/login-action@v4 + with: + registry: ${{ env.MANAGED_REGISTRY }} + username: docr + password: ${{ secrets.DOCR_TOKEN }} + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ${{ env.GHCR_REGISTRY }}/${{ env.GHCR_IMAGE_NAME }}-client + ${{ env.MANAGED_REGISTRY }}/${{ env.MANAGED_IMAGE_NAME }}-client + tags: | + type=raw,value=latest,enable=${{ github.ref_name == 'release' }} + type=raw,value=edge,enable={{ is_default_branch }} + type=ref,event=branch + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Build and push Docker + id: push + uses: docker/build-push-action@v5 + with: + context: ./client + target: production + push: true + cache-from: type=gha + cache-to: type=gha,mode=max + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: + "RAILS_ENV=production" \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000..02d3faf295 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,22 @@ +name: Production Release + +on: + workflow_dispatch: + +jobs: + build: + name: Update Release Branch + runs-on: ubuntu-latest + steps: + - name: "checkout release-candidate" + uses: actions/checkout@v4 + with: + ref: release + + - name: "Push release changes" + env: + GH_TOKEN: ${{ github.token }} + run: | + git fetch + git reset --hard origin/master + git push --force origin release \ No newline at end of file diff --git a/client/Dockerfile b/client/Dockerfile index 71660125db..3636a27bd3 100644 --- a/client/Dockerfile +++ b/client/Dockerfile @@ -5,7 +5,7 @@ COPY ./ /srv/app RUN yarn -FROM base AS dev +FROM base AS development RUN yarn build:dev @@ -13,7 +13,7 @@ FROM base AS dev CMD ["yarn", "run", "watch"] -FROM base AS prod +FROM base AS production RUN yarn build:prod From c76294732f360edfe852eba43370745431f2460b Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Tue, 10 Mar 2026 15:59:12 -0700 Subject: [PATCH 37/43] [C] Apply dependency security patches - Patches Ruby gems nokogiri and job-iteration - Patches NPM packages form-data and sha.js --- api/Gemfile | 4 ++-- api/Gemfile.lock | 10 +++++----- api/db/cache_structure.sql | 4 ++-- api/db/structure.sql | 4 ++-- client/yarn.lock | 26 ++++++++++++++++++-------- docker-compose.yml | 2 +- 6 files changed, 30 insertions(+), 20 deletions(-) diff --git a/api/Gemfile b/api/Gemfile index 672cd6c62d..87726f4876 100644 --- a/api/Gemfile +++ b/api/Gemfile @@ -64,7 +64,7 @@ gem "hashie", "~> 3.0" gem "has_scope", "~> 0.8.1" gem "httparty", "~> 0.21.0" gem "image_processing", "~> 1.12" -gem "job-iteration", "~> 1.10.0" +gem "job-iteration", "~> 1.11.0" gem "jsonb_accessor", "~> 1.0" gem "jwt", "~> 1.5" gem "kaminari", "~> 1.2" @@ -80,7 +80,7 @@ gem "namae", "~>1.0" gem "naught", "~> 1.0" gem "net-sftp", "~> 2.1.2" gem "net-ssh", "~> 5.2.0" -gem "nokogiri", "~> 1.15.4" +gem "nokogiri", "~> 1.19.1" gem "oauth2", "~> 1.3" gem "oj", "~> 3.16" gem "omniauth", "~> 2.1.3" diff --git a/api/Gemfile.lock b/api/Gemfile.lock index b76321ce40..86dd4cd424 100644 --- a/api/Gemfile.lock +++ b/api/Gemfile.lock @@ -425,7 +425,7 @@ GEM mini_magick (>= 4.9.5, < 5) ruby-vips (>= 2.0.17, < 3) jmespath (1.6.2) - job-iteration (1.10.0) + job-iteration (1.11.0) activejob (>= 6.1) json (2.10.2) json-schema (5.1.1) @@ -481,7 +481,7 @@ GEM mime-types-data (3.2023.0808) mini_magick (4.12.0) mini_mime (1.1.5) - mini_portile2 (2.8.4) + mini_portile2 (2.8.9) minitest (5.20.0) money (6.16.0) i18n (>= 0.6.4, <= 2) @@ -505,7 +505,7 @@ GEM net-protocol net-ssh (5.2.0) nio4r (2.5.9) - nokogiri (1.15.4) + nokogiri (1.19.1) mini_portile2 (~> 2.8.2) racc (~> 1.4) oauth (1.1.0) @@ -905,7 +905,7 @@ DEPENDENCIES hashie (~> 3.0) httparty (~> 0.21.0) image_processing (~> 1.12) - job-iteration (~> 1.10.0) + job-iteration (~> 1.11.0) jsonb_accessor (~> 1.0) jwt (~> 1.5) kaminari (~> 1.2) @@ -922,7 +922,7 @@ DEPENDENCIES naught (~> 1.0) net-sftp (~> 2.1.2) net-ssh (~> 5.2.0) - nokogiri (~> 1.15.4) + nokogiri (~> 1.19.1) oauth2 (~> 1.3) oj (~> 3.16) omniauth (~> 2.1.3) diff --git a/api/db/cache_structure.sql b/api/db/cache_structure.sql index 7adeae253b..a54c0c31b8 100644 --- a/api/db/cache_structure.sql +++ b/api/db/cache_structure.sql @@ -1,4 +1,4 @@ -\restrict 3qs0dn7x2lAiDdJ78j9TPjEfnB42pwWMWe3Luc3keyz7FNntcF1g47wu7y4jDaF +\restrict XGxSMPRdoINWMteiaXCtwBfCSnq34zW2HRM7bIX6j0GRJUCUvjSvB7Oup6rHI7p -- Dumped from database version 13.22 -- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) @@ -128,7 +128,7 @@ CREATE INDEX index_solid_cache_entries_on_key_hash_and_byte_size ON public.solid -- PostgreSQL database dump complete -- -\unrestrict 3qs0dn7x2lAiDdJ78j9TPjEfnB42pwWMWe3Luc3keyz7FNntcF1g47wu7y4jDaF +\unrestrict XGxSMPRdoINWMteiaXCtwBfCSnq34zW2HRM7bIX6j0GRJUCUvjSvB7Oup6rHI7p SET search_path TO "$user", public; diff --git a/api/db/structure.sql b/api/db/structure.sql index 88f6c3406a..12d8768190 100644 --- a/api/db/structure.sql +++ b/api/db/structure.sql @@ -1,4 +1,4 @@ -\restrict OI1rgHalIvDF5UL29S8nJbvhRJKIUusBSAEJwbetaiPj6bhTFIJggTwTu84S6lU +\restrict xNcXPHOUcET17jSznOdc5dMDiBF4y4WYKle7wEyvYgrJ5dArxRQVo6kaDOXP9C1 -- Dumped from database version 13.22 -- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) @@ -7952,7 +7952,7 @@ ALTER TABLE ONLY public.reading_group_composite_entries -- PostgreSQL database dump complete -- -\unrestrict OI1rgHalIvDF5UL29S8nJbvhRJKIUusBSAEJwbetaiPj6bhTFIJggTwTu84S6lU +\unrestrict xNcXPHOUcET17jSznOdc5dMDiBF4y4WYKle7wEyvYgrJ5dArxRQVo6kaDOXP9C1 SET search_path TO "$user", public; diff --git a/client/yarn.lock b/client/yarn.lock index 405b47501c..f45cafdc02 100644 --- a/client/yarn.lock +++ b/client/yarn.lock @@ -4889,13 +4889,14 @@ foreground-child@^3.1.0: signal-exit "^4.0.1" form-data@^4.0.0: - version "4.0.2" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.2.tgz#35cabbdd30c3ce73deb2c42d3c8d3ed9ca51794c" - integrity sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w== + version "4.0.4" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.4.tgz#784cdcce0669a9d68e94d11ac4eea98088edd2c4" + integrity sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow== dependencies: asynckit "^0.4.0" combined-stream "^1.0.8" es-set-tostringtag "^2.1.0" + hasown "^2.0.2" mime-types "^2.1.12" format@^0.2.0: @@ -5504,7 +5505,7 @@ inflight@^1.0.4: once "^1.3.0" wrappy "1" -inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.1, inherits@~2.0.3, inherits@~2.0.4: +inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.1, inherits@~2.0.3, inherits@~2.0.4: version "2.0.4" resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== @@ -8598,7 +8599,7 @@ safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== -safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@~5.2.0: +safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.1.0, safe-buffer@^5.2.1, safe-buffer@~5.2.0: version "5.2.1" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== @@ -8794,9 +8795,9 @@ setprototypeof@1.2.0: integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw== sha.js@^2.4.11: - version "2.4.11" - resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.11.tgz#37a5cf0b81ecbc6943de109ba2960d1b26584ae7" - integrity sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ== + version "2.4.12" + resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.12.tgz#eb8b568bf383dfd1867a32c3f2b74eb52bdbf23f" + integrity sha512-8LzC5+bvI45BjpfXU8V5fdU2mfeKiQe1D1gIMn7XUlF3OTUrpdJpPPH4EMAnF0DsHHdSZqCdSss5qCmJKuiO3w== dependencies: inherits "^2.0.1" safe-buffer "^5.0.1" @@ -9576,6 +9577,15 @@ tmp@^0.0.33: dependencies: os-tmpdir "~1.0.2" +to-buffer@^1.2.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/to-buffer/-/to-buffer-1.2.1.tgz#2ce650cdb262e9112a18e65dc29dcb513c8155e0" + integrity sha512-tB82LpAIWjhLYbqjx3X4zEeHN6M8CiuOEy2JY8SEQVdYRe3CCHOFaqrBW1doLDrfpWhplcW7BL+bO3/6S3pcDQ== + dependencies: + isarray "^2.0.5" + safe-buffer "^5.2.1" + typed-array-buffer "^1.0.3" + to-camel-case@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/to-camel-case/-/to-camel-case-1.0.0.tgz#1a56054b2f9d696298ce66a60897322b6f423e46" diff --git a/docker-compose.yml b/docker-compose.yml index 730198ea15..c77e9d9b3a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -82,7 +82,7 @@ services: stdin_open: true build: context: client - target: dev + target: development ports: - "13100:13100" # Rescue - "13101:13101" # SSR From bc06faf6118b01296659265e6e98cd3beb3ff175 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Thu, 12 Mar 2026 11:13:06 -0700 Subject: [PATCH 38/43] [F] Add optional override env var for Lockbox master key --- api/config/initializers/25_lockbox.rb | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/api/config/initializers/25_lockbox.rb b/api/config/initializers/25_lockbox.rb index dd4edd5d29..e6908b937f 100644 --- a/api/config/initializers/25_lockbox.rb +++ b/api/config/initializers/25_lockbox.rb @@ -1,8 +1,14 @@ # frozen_string_literal: true -secret_key_base = Rails.env.test? ? Lockbox.generate_key : Rails.application.secret_key_base +lockbox_master_key = if Rails.env.test? + Lockbox.generate_key +elsif ENV["LOCKBOX_MASTER_KEY"].present? + ENV["LOCKBOX_MASTER_KEY"] +else + Rails.application.secret_key_base +end # Consistently ensure that it's a 64-character hexadecimal key -enforced_master_key = secret_key_base.gsub(/[^a-zA-Z0-9]+/, "")[/\A([a-zA-Z0-9]{1,64})/, 1].rjust(64, "0") +enforced_master_key = lockbox_master_key.gsub(/[^a-zA-Z0-9]+/, "")[/\A([a-zA-Z0-9]{1,64})/, 1].rjust(64, "0") Lockbox.master_key = enforced_master_key From 4e02c32a3d3e626219ee0fa332136c3199cc1b77 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 18 Mar 2026 14:03:04 -0700 Subject: [PATCH 39/43] [C] Duplicate healthcheck endpoint to api namespace Allows it to be available to API instances proxied behind /api --- api/config/routes.rb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/api/config/routes.rb b/api/config/routes.rb index f98d80e51a..ff769979bd 100644 --- a/api/config/routes.rb +++ b/api/config/routes.rb @@ -19,7 +19,8 @@ mount GoodJob::Engine => '/api/good_job' end - get "up" => "health#show", as: :health_check + get "up" => "health#show" + get "api/up" => "health#show" get "auth/:provider/callback", to: "oauth#authorize" From 30da7750bf059066ef8b33315a5b494e85f01b7c Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 18 Mar 2026 14:03:07 -0700 Subject: [PATCH 40/43] [B] Update rate limiting to use remote IP instead of request IP Addresses an issue where requests routed through reverse proxies / load balancers were being aggressively throttled due to obscured client IP addresses --- api/config/application.rb | 1 + api/config/initializers/rack_attack.rb | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/api/config/application.rb b/api/config/application.rb index 4accccab62..c793cbbe67 100644 --- a/api/config/application.rb +++ b/api/config/application.rb @@ -83,6 +83,7 @@ class Application < Rails::Application # Skip views, helpers and assets when generating a new resource. config.api_only = true + config.middleware.use ActionDispatch::RemoteIp config.middleware.use Rack::MethodOverride config.middleware.use ActionDispatch::Flash config.middleware.use ActionDispatch::Cookies diff --git a/api/config/initializers/rack_attack.rb b/api/config/initializers/rack_attack.rb index a96f088eeb..d617540bc4 100644 --- a/api/config/initializers/rack_attack.rb +++ b/api/config/initializers/rack_attack.rb @@ -38,7 +38,8 @@ end Rack::Attack.throttle throttler.ip_key, **throttler.options do |request| - request.ip if request.env["manifold_env.throttled_category"] == throttler.category + next unless request.env["manifold_env.throttled_category"] == throttler.category + request.env["action_dispatch.remote_ip"]&.calculate_ip || request.ip end end @@ -56,13 +57,13 @@ Rack::Attack.blocklisted_responder = lambda do |request| # :nocov: - [503, {}, ["Internal Server Error\n"]] + [429, {}, ["Rate Limit Exceeded\n"]] # :nocov: end Rack::Attack.throttled_responder = lambda do |request| # :nocov: - [503, {}, ["Internal Server Error\n"]] + [429, {}, ["Rate Limit Exceeded\n"]] # :nocov: end end From 71d5f041c5bfd651240113a68ef3763250d832df Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 18 Mar 2026 14:07:58 -0700 Subject: [PATCH 41/43] [C] Lint --- api/config/initializers/25_lockbox.rb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/api/config/initializers/25_lockbox.rb b/api/config/initializers/25_lockbox.rb index e6908b937f..d27953484d 100644 --- a/api/config/initializers/25_lockbox.rb +++ b/api/config/initializers/25_lockbox.rb @@ -1,12 +1,12 @@ # frozen_string_literal: true lockbox_master_key = if Rails.env.test? - Lockbox.generate_key -elsif ENV["LOCKBOX_MASTER_KEY"].present? - ENV["LOCKBOX_MASTER_KEY"] -else - Rails.application.secret_key_base -end + Lockbox.generate_key + elsif ENV["LOCKBOX_MASTER_KEY"].present? + ENV["LOCKBOX_MASTER_KEY"] + else + Rails.application.secret_key_base + end # Consistently ensure that it's a 64-character hexadecimal key enforced_master_key = lockbox_master_key.gsub(/[^a-zA-Z0-9]+/, "")[/\A([a-zA-Z0-9]{1,64})/, 1].rjust(64, "0") From 841dd7779e58f0addf6060cbbb73f46972e40edc Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Wed, 18 Mar 2026 14:26:25 -0700 Subject: [PATCH 42/43] [C] Update expected rate limited code in specs --- api/db/cache_structure.sql | 4 ++-- api/db/structure.sql | 4 ++-- api/spec/requests/comments_spec.rb | 2 +- api/spec/requests/reading_groups_spec.rb | 2 +- .../requests/text_sections/relationships/annotations_spec.rb | 2 +- api/spec/requests/users_spec.rb | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/api/db/cache_structure.sql b/api/db/cache_structure.sql index a54c0c31b8..4d0d2edded 100644 --- a/api/db/cache_structure.sql +++ b/api/db/cache_structure.sql @@ -1,4 +1,4 @@ -\restrict XGxSMPRdoINWMteiaXCtwBfCSnq34zW2HRM7bIX6j0GRJUCUvjSvB7Oup6rHI7p +\restrict acy6eTEdEyCwlH42B66Wg4BSGl2eqioDwK0CMSORx0X8L8IA6xc2r6aweKCXVYY -- Dumped from database version 13.22 -- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) @@ -128,7 +128,7 @@ CREATE INDEX index_solid_cache_entries_on_key_hash_and_byte_size ON public.solid -- PostgreSQL database dump complete -- -\unrestrict XGxSMPRdoINWMteiaXCtwBfCSnq34zW2HRM7bIX6j0GRJUCUvjSvB7Oup6rHI7p +\unrestrict acy6eTEdEyCwlH42B66Wg4BSGl2eqioDwK0CMSORx0X8L8IA6xc2r6aweKCXVYY SET search_path TO "$user", public; diff --git a/api/db/structure.sql b/api/db/structure.sql index 12d8768190..32e1d5616e 100644 --- a/api/db/structure.sql +++ b/api/db/structure.sql @@ -1,4 +1,4 @@ -\restrict xNcXPHOUcET17jSznOdc5dMDiBF4y4WYKle7wEyvYgrJ5dArxRQVo6kaDOXP9C1 +\restrict LNheIDBOnK0QpSehtUWNxnxD3q94AfOg7XcUemfZCwkIiA26sNgFEmPkBcpqG1A -- Dumped from database version 13.22 -- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1) @@ -7952,7 +7952,7 @@ ALTER TABLE ONLY public.reading_group_composite_entries -- PostgreSQL database dump complete -- -\unrestrict xNcXPHOUcET17jSznOdc5dMDiBF4y4WYKle7wEyvYgrJ5dArxRQVo6kaDOXP9C1 +\unrestrict LNheIDBOnK0QpSehtUWNxnxD3q94AfOg7XcUemfZCwkIiA26sNgFEmPkBcpqG1A SET search_path TO "$user", public; diff --git a/api/spec/requests/comments_spec.rb b/api/spec/requests/comments_spec.rb index ca4a33d74a..846a00db6a 100644 --- a/api/spec/requests/comments_spec.rb +++ b/api/spec/requests/comments_spec.rb @@ -133,7 +133,7 @@ end.to change(Comment, :count).by(10) .and change(ThrottledRequest, :count).by(1) - expect(response).to have_http_status(:service_unavailable) + expect(response).to have_http_status(:too_many_requests) end context "when the comment is spammy" do diff --git a/api/spec/requests/reading_groups_spec.rb b/api/spec/requests/reading_groups_spec.rb index cff9292134..ec092d7074 100644 --- a/api/spec/requests/reading_groups_spec.rb +++ b/api/spec/requests/reading_groups_spec.rb @@ -149,7 +149,7 @@ def making_the_request end.to change(ReadingGroup, :count).by(10) .and change(ThrottledRequest, :count).by(1) - expect(response).to have_http_status(:service_unavailable) + expect(response).to have_http_status(:too_many_requests) end context "when the user has an unconfirmed email" do diff --git a/api/spec/requests/text_sections/relationships/annotations_spec.rb b/api/spec/requests/text_sections/relationships/annotations_spec.rb index 3ebca8160f..1210d04b58 100644 --- a/api/spec/requests/text_sections/relationships/annotations_spec.rb +++ b/api/spec/requests/text_sections/relationships/annotations_spec.rb @@ -206,7 +206,7 @@ def make_the_request! end.to change(Annotation, :count).by(5) .and change(ThrottledRequest, :count).by(1) - expect(response).to have_http_status(:service_unavailable) + expect(response).to have_http_status(:too_many_requests) end context "when the user has not confirmed their email" do diff --git a/api/spec/requests/users_spec.rb b/api/spec/requests/users_spec.rb index 72f73dc18c..e77252c0de 100644 --- a/api/spec/requests/users_spec.rb +++ b/api/spec/requests/users_spec.rb @@ -83,7 +83,7 @@ def make_request!(headers: anonymous_headers, params: valid_params) end.to change(User, :count).by(5) .and change(ThrottledRequest, :count).by(1) - expect(response).to have_http_status(:service_unavailable) + expect(response).to have_http_status(:too_many_requests) end it "tells the welcome mailer that the user was created by the admin when meta[createdByAdmin] is true" do @@ -121,7 +121,7 @@ def make_request!(headers: anonymous_headers, params: valid_params) end.to keep_the_same(User, :count) .and change(ThrottledRequest, :count).by(1) - expect(response).to have_http_status(:service_unavailable) + expect(response).to have_http_status(:too_many_requests) end end end From fa66122196a706785047ed9baef464c526206eb7 Mon Sep 17 00:00:00 2001 From: Tim Frazee Date: Thu, 19 Mar 2026 10:08:49 -0700 Subject: [PATCH 43/43] [F] Add ability to define request header to use as rate limiting key Addresses an issue where proxies using non-standard forwarding headers were being rate limited on the proxy's IP --- api/config/application.rb | 1 + api/config/initializers/rack_attack.rb | 7 ++++++- docker/local.env | 4 ++-- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/api/config/application.rb b/api/config/application.rb index c793cbbe67..0af697c780 100644 --- a/api/config/application.rb +++ b/api/config/application.rb @@ -28,6 +28,7 @@ module Dotenv class Railtie < Rails::Railtie def load Dotenv.load( + root.join("./.env"), root.join("../.env.local"), root.join("../.env.#{Rails.env}"), root.join("../.env") diff --git a/api/config/initializers/rack_attack.rb b/api/config/initializers/rack_attack.rb index d617540bc4..f3e2ccac5c 100644 --- a/api/config/initializers/rack_attack.rb +++ b/api/config/initializers/rack_attack.rb @@ -39,7 +39,12 @@ Rack::Attack.throttle throttler.ip_key, **throttler.options do |request| next unless request.env["manifold_env.throttled_category"] == throttler.category - request.env["action_dispatch.remote_ip"]&.calculate_ip || request.ip + + ENV.fetch("PROXY_CLIENT_IP_HEADER", "").split(/,\s*/).map do |header| + request.get_header(header) + end.push(request.env["action_dispatch.remote_ip"].to_s, request.ip) + .compact_blank + .first end end diff --git a/docker/local.env b/docker/local.env index df4930d926..06ea8120d8 100644 --- a/docker/local.env +++ b/docker/local.env @@ -11,9 +11,9 @@ API_CABLE_PORT=13120 CLIENT_URL=http://localhost:13100 -CLIENT_BROWSER_API_URL=https://web.manifold.orb.local +CLIENT_BROWSER_API_URL=http://localhost:13110 CLIENT_BROWSER_API_CABLE_URL=http://localhost:13120 -CLIENT_SERVER_API_URL=https://web.manifold.orb.local +CLIENT_SERVER_API_URL=http://localhost:13110 CLIENT_SERVER_PROXIES=true