diff --git a/.github/workflows/continuous_integration.yml b/.github/workflows/continuous_integration.yml
index af73dc4fd6..b4f2bfef91 100644
--- a/.github/workflows/continuous_integration.yml
+++ b/.github/workflows/continuous_integration.yml
@@ -8,10 +8,11 @@ on:
env:
BROWSERSLIST_IGNORE_OLD_DATA: beQuiet
- DATABASE_URL: "postgres://postgres:postgres@localhost:5432/manifold_test"
DISABLE_SPRING: yes
PATH: /usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin:/usr/local/sbin
RAILS_ENV: test
+ DATABASE_URL: "postgres://postgres:postgres@localhost:5432/manifold_test"
+ CACHE_DATABASE_URL: "postgres://postgres:postgres@localhost:5432/manifold_cache_test"
jobs:
client-lint:
@@ -59,14 +60,8 @@ jobs:
env:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
- POSTGRES_DB: manifold_test
options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5
- redis:
- image: redis:6.2.7-alpine
- ports: ["6379:6379"]
- options: --entrypoint redis-server
-
steps:
- name: "Checkout code"
uses: actions/checkout@v3
@@ -82,7 +77,7 @@ jobs:
- name: "Setup test database"
working-directory: "./api"
- run: bin/rails db:setup
+ run: bin/rails db:test:prepare
- name: "Run API specs"
working-directory: "./api"
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
new file mode 100644
index 0000000000..b8da1e24f4
--- /dev/null
+++ b/.github/workflows/docker.yml
@@ -0,0 +1,113 @@
+name: "Build & Push Docker"
+
+on:
+ push:
+ branches:
+ - next # Next, tags as "next"
+ - master # Edge, tags as "edge"
+ - release # Production, tags as "latest"
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref_name }}
+ cancel-in-progress: true
+
+env:
+ GHCR_REGISTRY: ghcr.io
+ GHCR_IMAGE_NAME: ${{ github.repository }}
+ MANAGED_REGISTRY: registry.digitalocean.com
+ MANAGED_IMAGE_NAME: manifold/manifold
+
+jobs:
+ build-api:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ packages: write
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Login GHCR
+ uses: docker/login-action@v4
+ with:
+ registry: ${{ env.GHCR_REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Login DOCR
+ uses: docker/login-action@v4
+ with:
+ registry: ${{ env.MANAGED_REGISTRY }}
+ username: docr
+ password: ${{ secrets.DOCR_TOKEN }}
+ - name: Docker meta
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: |
+ ${{ env.GHCR_REGISTRY }}/${{ env.GHCR_IMAGE_NAME }}-api
+ ${{ env.MANAGED_REGISTRY }}/${{ env.MANAGED_IMAGE_NAME }}-api
+ tags: |
+ type=raw,value=latest,enable=${{ github.ref_name == 'release' }}
+ type=raw,value=edge,enable={{ is_default_branch }}
+ type=ref,event=branch
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+ - name: Build and push Docker
+ id: push
+ uses: docker/build-push-action@v5
+ with:
+ context: ./api
+ target: production
+ push: true
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ build-args:
+ "RAILS_ENV=production"
+
+ build-client:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ packages: write
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Login GHCR
+ uses: docker/login-action@v4
+ with:
+ registry: ${{ env.GHCR_REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Login DOCR
+ uses: docker/login-action@v4
+ with:
+ registry: ${{ env.MANAGED_REGISTRY }}
+ username: docr
+ password: ${{ secrets.DOCR_TOKEN }}
+ - name: Docker meta
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: |
+ ${{ env.GHCR_REGISTRY }}/${{ env.GHCR_IMAGE_NAME }}-client
+ ${{ env.MANAGED_REGISTRY }}/${{ env.MANAGED_IMAGE_NAME }}-client
+ tags: |
+ type=raw,value=latest,enable=${{ github.ref_name == 'release' }}
+ type=raw,value=edge,enable={{ is_default_branch }}
+ type=ref,event=branch
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+ - name: Build and push Docker
+ id: push
+ uses: docker/build-push-action@v5
+ with:
+ context: ./client
+ target: production
+ push: true
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ build-args:
+ "RAILS_ENV=production"
\ No newline at end of file
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000000..02d3faf295
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,22 @@
+name: Production Release
+
+on:
+ workflow_dispatch:
+
+jobs:
+ build:
+ name: Update Release Branch
+ runs-on: ubuntu-latest
+ steps:
+ - name: "checkout release-candidate"
+ uses: actions/checkout@v4
+ with:
+ ref: release
+
+ - name: "Push release changes"
+ env:
+ GH_TOKEN: ${{ github.token }}
+ run: |
+ git fetch
+ git reset --hard origin/master
+ git push --force origin release
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 668baf3934..a0cd7a8420 100644
--- a/.gitignore
+++ b/.gitignore
@@ -64,3 +64,5 @@ backup-local*.tar
/docker/minio/client/*
!/docker/minio/client/.keep
!/docker/minio/client/initialize.sh
+
+CLAUDE.md
\ No newline at end of file
diff --git a/api/.gitignore b/api/.gitignore
index 4930d183c8..906d344d27 100644
--- a/api/.gitignore
+++ b/api/.gitignore
@@ -40,3 +40,5 @@ backup*
/config/*.local.yml
/config/credentials/local.*
+
+.env
\ No newline at end of file
diff --git a/api/Dockerfile b/api/Dockerfile
new file mode 100644
index 0000000000..d8d7b01693
--- /dev/null
+++ b/api/Dockerfile
@@ -0,0 +1,69 @@
+FROM ruby:3.2-bullseye AS base
+
+RUN mkdir -pv /bundle/bin
+
+ENV BUNDLE_PATH=/bundle \
+ BUNDLE_BIN=/bundle/bin \
+ GEM_HOME=/bundle \
+ RACK_ENV=development \
+ RAILS_ENV=development \
+ RAILS_LOG_TO_STDOUT=true \
+ RAILS_SERVE_STATIC_FILES=true \
+ RAILS_MIN_THREADS=16 \
+ RAILS_MAX_THREADS=16 \
+ WORKER_COUNT=0
+
+COPY docker/install_node_16.sh /usr/local/src/install_node_16.sh
+
+RUN bash /usr/local/src/install_node_16.sh
+
+RUN apt-get update -qq && apt-get install -y -qq --no-install-recommends \
+ build-essential \
+ ca-certificates \
+ curl \
+ ghostscript \
+ gnupg gnupg2 \
+ imagemagick \
+ libglib2.0-0 libglib2.0-dev \
+ libicu-dev \
+ libjemalloc2 \
+ libpoppler-glib8 \
+ librsvg2-bin \
+ libsndfile1-dev \
+ libvips \
+ libvips-dev \
+ mediainfo \
+ nodejs \
+ postgresql-common \
+ pandoc
+
+RUN DEBIAN_FRONTEND=noninteractive /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y
+
+RUN apt-get update -qq && apt-get install -y -qq --no-install-recommends postgresql-client-13
+
+RUN npm install -g mammoth@^1.4.16 wscat@^6.0.1
+
+RUN sed -i '//d' /etc/ImageMagick-6/policy.xml
+
+COPY ./ /srv/app/
+
+WORKDIR /srv/app
+COPY Gemfile /srv/app/Gemfile
+COPY Gemfile.lock /srv/app/Gemfile.lock
+
+COPY docker/entrypoint.sh /usr/bin/
+RUN chmod +x /usr/bin/entrypoint.sh
+ENTRYPOINT ["entrypoint.sh"]
+
+ENV MAMMOTH_PATH=/usr/bin/mammoth
+ENV PATH="${BUNDLE_BIN}:${PATH}"
+ENV LD_PRELOAD=libjemalloc.so.2
+
+RUN bundle install
+
+CMD ["bin/puma", "-C", "config/puma.rb"]
+
+# There are currently no differences between dev and prod Dockerfiles, but these are here to provide parity with the client Dockerfile
+FROM base AS development
+
+FROM base AS production
\ No newline at end of file
diff --git a/api/Gemfile b/api/Gemfile
index 14c638537c..87726f4876 100644
--- a/api/Gemfile
+++ b/api/Gemfile
@@ -6,8 +6,6 @@ gem "aasm", "~> 4.0"
gem "absolute_time", "~> 1.0.0"
gem "active_interaction", "~> 4.0"
gem "activejob-retry", "~> 0.4"
-gem "activejob-traffic_control", "~>0.1.0"
-gem "activejob-uniqueness", "~> 0.2.4"
gem "activerecord-import", "~> 1.4.1"
gem "active_record_upsert", "~> 0.11.1"
gem "acts_as_list", "~> 1.2.4"
@@ -29,6 +27,7 @@ gem "cleanroom"
gem "closure_tree", "~> 7.0"
# Until Rails 7.1
gem "composite_primary_keys", "~> 14.0.10"
+gem "connection_pool"
gem "crass", "~> 1.0.5"
gem "csl-styles", "~> 1.0"
gem "cssbeautify"
@@ -55,6 +54,7 @@ gem "fast_jsonapi", git: "https://github.com/ManifoldScholar/fast_jsonapi.git",
gem "filesize", "~> 0.1"
gem "format_engine", "~> 0.7.7"
gem "friendly_id", "~> 5.0"
+gem "solid_cache", "~> 0.7"
gem "fuzzy_match", "~> 2.1.0"
gem "geocoder", "1.8.2"
gem "gepub", "~> 1.0.4"
@@ -64,7 +64,7 @@ gem "hashie", "~> 3.0"
gem "has_scope", "~> 0.8.1"
gem "httparty", "~> 0.21.0"
gem "image_processing", "~> 1.12"
-gem "job-iteration", "~> 1.10.0"
+gem "job-iteration", "~> 1.11.0"
gem "jsonb_accessor", "~> 1.0"
gem "jwt", "~> 1.5"
gem "kaminari", "~> 1.2"
@@ -80,7 +80,7 @@ gem "namae", "~>1.0"
gem "naught", "~> 1.0"
gem "net-sftp", "~> 2.1.2"
gem "net-ssh", "~> 5.2.0"
-gem "nokogiri", "~> 1.15.4"
+gem "nokogiri", "~> 1.19.1"
gem "oauth2", "~> 1.3"
gem "oj", "~> 3.16"
gem "omniauth", "~> 2.1.3"
@@ -104,9 +104,6 @@ gem "rack-cors", "~> 1.0"
gem "rails", "~> 7.0"
gem "rainbow", "~> 3.0"
gem "redcarpet", "~> 3.5"
-gem "redis", "~> 4.5"
-gem "redis-namespace", "~> 1.0"
-gem "redis-objects", "~> 1.4"
gem "rolify", "~> 5.1"
gem "rubyzip", "~> 2.3.1"
gem "scanf", "~> 1.0.0"
@@ -114,7 +111,6 @@ gem "scenic", "~> 1.4"
gem "shrine", "~> 3.5.0"
gem "shrine-google_cloud_storage", "~> 3.3"
gem "shrine-tus", "~> 2.0"
-gem "sidekiq", "< 6"
gem "signet", "~> 0.10"
gem "sinatra", "~>2.2"
gem "sixarm_ruby_unaccent", "~> 1.2.2"
@@ -134,7 +130,6 @@ gem "validates_email_format_of", "~> 1.0"
gem "validate_url", "~> 1.0"
gem "with_advisory_lock", "~> 4.0"
gem "zaru", "~> 1.0.0"
-gem "zhong", "~> 0.3"
group :development, :test do
gem "pry-byebug"
@@ -170,7 +165,8 @@ end
group :test do
gem "database_cleaner-active_record", "~> 2.1.0"
- gem "database_cleaner-redis", "~> 2.0"
gem "test-prof", "~> 1.0"
gem "with_model", "~> 2.1"
end
+
+gem "good_job", "~> 3.99"
diff --git a/api/Gemfile.lock b/api/Gemfile.lock
index 0d99aeac87..86dd4cd424 100644
--- a/api/Gemfile.lock
+++ b/api/Gemfile.lock
@@ -77,13 +77,6 @@ GEM
activejob-retry (0.6.3)
activejob (>= 4.2)
activesupport (>= 4.2)
- activejob-traffic_control (0.1.3)
- activejob (>= 4.2)
- activesupport (>= 4.2)
- suo
- activejob-uniqueness (0.2.5)
- activejob (>= 4.2, < 7.1)
- redlock (>= 1.2, < 2)
activemodel (7.0.8.7)
activesupport (= 7.0.8.7)
activemodel-serializers-xml (1.0.3)
@@ -192,9 +185,6 @@ GEM
activerecord (>= 5.a)
database_cleaner-core (~> 2.0.0)
database_cleaner-core (2.0.1)
- database_cleaner-redis (2.0.0)
- database_cleaner-core (~> 2.0.0)
- redis
date (3.3.3)
declarative (0.0.20)
device_detector (1.0.7)
@@ -295,6 +285,8 @@ GEM
equalizer (0.0.11)
errbase (0.2.2)
erubi (1.12.0)
+ et-orbi (1.2.11)
+ tzinfo
factory_bot (6.2.1)
activesupport (>= 5.0.0)
factory_bot_rails (6.2.0)
@@ -333,6 +325,9 @@ GEM
format_engine (0.7.10)
friendly_id (5.5.0)
activerecord (>= 4.0.0)
+ fugit (1.11.1)
+ et-orbi (~> 1, >= 1.2.11)
+ raabro (~> 1.4)
fuzzy_match (2.1.0)
gems (1.2.0)
geocoder (1.8.2)
@@ -341,6 +336,13 @@ GEM
rubyzip (> 1.1.1, < 2.4)
globalid (1.2.1)
activesupport (>= 6.1)
+ good_job (3.99.1)
+ activejob (>= 6.0.0)
+ activerecord (>= 6.0.0)
+ concurrent-ruby (>= 1.0.2)
+ fugit (>= 1.1)
+ railties (>= 6.0.0)
+ thor (>= 0.14.1)
google-api-client (0.53.0)
google-apis-core (~> 0.1)
google-apis-generator (~> 0.1)
@@ -423,7 +425,7 @@ GEM
mini_magick (>= 4.9.5, < 5)
ruby-vips (>= 2.0.17, < 3)
jmespath (1.6.2)
- job-iteration (1.10.0)
+ job-iteration (1.11.0)
activejob (>= 6.1)
json (2.10.2)
json-schema (5.1.1)
@@ -479,11 +481,10 @@ GEM
mime-types-data (3.2023.0808)
mini_magick (4.12.0)
mini_mime (1.1.5)
- mini_portile2 (2.8.4)
+ mini_portile2 (2.8.9)
minitest (5.20.0)
money (6.16.0)
i18n (>= 0.6.4, <= 2)
- msgpack (1.7.2)
multi_json (1.15.0)
multi_xml (0.6.0)
multipart-post (2.3.0)
@@ -504,7 +505,7 @@ GEM
net-protocol
net-ssh (5.2.0)
nio4r (2.5.9)
- nokogiri (1.15.4)
+ nokogiri (1.19.1)
mini_portile2 (~> 2.8.2)
racc (~> 1.4)
oauth (1.1.0)
@@ -581,6 +582,7 @@ GEM
public_suffix (5.0.3)
puma (6.4.0)
nio4r (~> 2.0)
+ raabro (1.4.0)
racc (1.7.1)
rack (2.2.13)
rack-attack (6.7.0)
@@ -626,13 +628,6 @@ GEM
ffi (~> 1.0)
rbtree (0.4.6)
redcarpet (3.6.0)
- redis (4.5.1)
- redis-namespace (1.11.0)
- redis (>= 4)
- redis-objects (1.7.0)
- redis
- redlock (1.3.2)
- redis (>= 3.0.0, < 6.0)
regexp_parser (2.10.0)
representable (3.2.0)
declarative (< 0.1.0)
@@ -743,11 +738,6 @@ GEM
down (~> 5.0)
http (>= 3.2, < 6)
shrine (>= 3.0.0.rc, < 4)
- sidekiq (5.2.10)
- connection_pool (~> 2.2, >= 2.2.2)
- rack (~> 2.0)
- rack-protection (>= 1.5.0)
- redis (~> 4.5, < 4.6.0)
signet (0.18.0)
addressable (~> 2.8)
faraday (>= 0.17.5, < 3.a)
@@ -769,6 +759,10 @@ GEM
snaky_hash (2.0.1)
hashie
version_gem (~> 1.1, >= 1.1.1)
+ solid_cache (0.7.0)
+ activejob (>= 7)
+ activerecord (>= 7)
+ railties (>= 7)
sorted_set (1.0.3)
rbtree
set (~> 1.0)
@@ -785,10 +779,6 @@ GEM
activerecord (>= 5.2)
strip_attributes (1.13.0)
activemodel (>= 3.0, < 8.0)
- suo (0.4.0)
- dalli
- msgpack
- redis
systemu (2.6.5)
terminal-table (3.0.2)
unicode-display_width (>= 1.1.1, < 3)
@@ -848,11 +838,6 @@ GEM
yard_types (0.2.0)
zaru (1.0.0)
zeitwerk (2.6.12)
- zhong (0.3.0)
- activesupport
- redis
- suo
- tzinfo
PLATFORMS
ruby
@@ -863,8 +848,6 @@ DEPENDENCIES
active_interaction (~> 4.0)
active_record_upsert (~> 0.11.1)
activejob-retry (~> 0.4)
- activejob-traffic_control (~> 0.1.0)
- activejob-uniqueness (~> 0.2.4)
activerecord-import (~> 1.4.1)
acts-as-taggable-on (~> 9.0)
acts_as_list (~> 1.2.4)
@@ -883,6 +866,7 @@ DEPENDENCIES
cleanroom
closure_tree (~> 7.0)
composite_primary_keys (~> 14.0.10)
+ connection_pool
crass (~> 1.0.5)
csl-styles (~> 1.0)
css_parser (~> 1.0)
@@ -890,7 +874,6 @@ DEPENDENCIES
dalli (= 2.7.11)
data_uri_revived
database_cleaner-active_record (~> 2.1.0)
- database_cleaner-redis (~> 2.0)
dotenv-rails (~> 2.0)
draper (~> 4.0.3)
dry-core (~> 1.0)
@@ -915,13 +898,14 @@ DEPENDENCIES
fuzzy_match (~> 2.1.0)
geocoder (= 1.8.2)
gepub (~> 1.0.4)
+ good_job (~> 3.99)
google-api-client (~> 0.53.0)
google_drive (~> 3.0)
has_scope (~> 0.8.1)
hashie (~> 3.0)
httparty (~> 0.21.0)
image_processing (~> 1.12)
- job-iteration (~> 1.10.0)
+ job-iteration (~> 1.11.0)
jsonb_accessor (~> 1.0)
jwt (~> 1.5)
kaminari (~> 1.2)
@@ -938,7 +922,7 @@ DEPENDENCIES
naught (~> 1.0)
net-sftp (~> 2.1.2)
net-ssh (~> 5.2.0)
- nokogiri (~> 1.15.4)
+ nokogiri (~> 1.19.1)
oauth2 (~> 1.3)
oj (~> 3.16)
omniauth (~> 2.1.3)
@@ -964,9 +948,6 @@ DEPENDENCIES
rails (~> 7.0)
rainbow (~> 3.0)
redcarpet (~> 3.5)
- redis (~> 4.5)
- redis-namespace (~> 1.0)
- redis-objects (~> 1.4)
rolify (~> 5.1)
rspec-collection_matchers (~> 1.2.0)
rspec-core (~> 3.12)
@@ -990,11 +971,11 @@ DEPENDENCIES
shrine (~> 3.5.0)
shrine-google_cloud_storage (~> 3.3)
shrine-tus (~> 2.0)
- sidekiq (< 6)
signet (~> 0.10)
simplecov
sinatra (~> 2.2)
sixarm_ruby_unaccent (~> 1.2.2)
+ solid_cache (~> 0.7)
sorted_set (~> 1.0, >= 1.0.3)
spring
spring-commands-rspec (~> 1.0.4)
@@ -1018,7 +999,6 @@ DEPENDENCIES
with_advisory_lock (~> 4.0)
with_model (~> 2.1)
zaru (~> 1.0.0)
- zhong (~> 0.3)
BUNDLED WITH
2.4.19
diff --git a/api/Procfile b/api/Procfile
index fd66461212..183b15aedc 100644
--- a/api/Procfile
+++ b/api/Procfile
@@ -1,3 +1,3 @@
api: ./bin/puma
-worker: ./bin/sidekiq_dev
+worker: ./bin/good_job start --probe-port 7001
zhong: ./bin/zhong zhong.rb
diff --git a/api/app/controllers/api/proxy/ingestion_sources_controller.rb b/api/app/controllers/api/proxy/ingestion_sources_controller.rb
index 696bdecd11..6170c367b6 100644
--- a/api/app/controllers/api/proxy/ingestion_sources_controller.rb
+++ b/api/app/controllers/api/proxy/ingestion_sources_controller.rb
@@ -10,7 +10,7 @@ def show
if source.attachment.storage.respond_to? :path
send_attachment(source)
else
- redirect_to source.attachment.url
+ redirect_to source.attachment.url, allow_other_host: true
end
end
diff --git a/api/app/controllers/application_controller.rb b/api/app/controllers/application_controller.rb
index 602aae8c5b..1da4b48649 100644
--- a/api/app/controllers/application_controller.rb
+++ b/api/app/controllers/application_controller.rb
@@ -167,6 +167,7 @@ def authority_forbidden(error)
def render_error_response(error)
# :nocov:
raise error if Rails.env.test?
+ Rails.logger.error(["#{error.class.name} - #{error.message}", *error.backtrace].join("\n"))
options = {
status: 500,
diff --git a/api/app/controllers/health_controller.rb b/api/app/controllers/health_controller.rb
new file mode 100644
index 0000000000..60988f2334
--- /dev/null
+++ b/api/app/controllers/health_controller.rb
@@ -0,0 +1,7 @@
+# frozen_string_literal: true
+
+class HealthController < ApplicationController
+ def show
+ render json: { ok: true }
+ end
+end
diff --git a/api/app/jobs/annotation_jobs/adopt_or_orphan_job.rb b/api/app/jobs/annotation_jobs/adopt_or_orphan_job.rb
index ea5f55ff33..cd768f4f3c 100644
--- a/api/app/jobs/annotation_jobs/adopt_or_orphan_job.rb
+++ b/api/app/jobs/annotation_jobs/adopt_or_orphan_job.rb
@@ -5,7 +5,7 @@ module AnnotationJobs
class AdoptOrOrphanJob < ApplicationJob
queue_as :annotations
- unique :until_executed, lock_ttl: 15.minutes, on_conflict: :log
+ unique_job! by: :job
# @param [Annotation] annotation
# @return [void]
diff --git a/api/app/jobs/application_job.rb b/api/app/jobs/application_job.rb
index 00159feeec..6af7ff4025 100644
--- a/api/app/jobs/application_job.rb
+++ b/api/app/jobs/application_job.rb
@@ -1,6 +1,7 @@
# frozen_string_literal: true
class ApplicationJob < ActiveJob::Base
+ include JobConcurrency
def match_result(result, &)
Dry::Matcher::ResultMatcher.(result, &)
end
diff --git a/api/app/jobs/attachments/process_attachment_job.rb b/api/app/jobs/attachments/process_attachment_job.rb
index e9fe6f83ed..52d960caad 100644
--- a/api/app/jobs/attachments/process_attachment_job.rb
+++ b/api/app/jobs/attachments/process_attachment_job.rb
@@ -3,8 +3,12 @@
module Attachments
class ProcessAttachmentJob < ApplicationJob
include ExclusiveJob
+ include GoodJob::ActiveJobExtensions::Concurrency
- concurrency 1, drop: false unless Rails.env.test?
+ good_job_control_concurrency_with(
+ perform_limit: 1,
+ key: -> { "ProcessAttachmentJob:#{arguments.slice(1, 2).flatten.join(':')}" }
+ )
discard_on ActiveJob::DeserializationError, ActiveRecord::RecordNotFound
diff --git a/api/app/jobs/formatted_attributes/purge_legacy_caches_job.rb b/api/app/jobs/formatted_attributes/purge_legacy_caches_job.rb
deleted file mode 100644
index ea7accdb7f..0000000000
--- a/api/app/jobs/formatted_attributes/purge_legacy_caches_job.rb
+++ /dev/null
@@ -1,29 +0,0 @@
-# frozen_string_literal: true
-
-module FormattedAttributes
- class PurgeLegacyCachesJob < ApplicationJob
- queue_as :default
-
- PATTERNS = %w[
- */plaintext/*
- */formatted/*
- ].freeze
-
- # @return [void]
- def perform
- redis = Redis.new
-
- PATTERNS.each do |pattern|
- cursor = 0
-
- loop do
- cursor, keys = redis.scan cursor, match: pattern
-
- redis.del *keys if keys.any?
-
- break if cursor == "0"
- end
- end
- end
- end
-end
diff --git a/api/app/jobs/packaging/exportation/export_text_to_epub_v3_job.rb b/api/app/jobs/packaging/exportation/export_text_to_epub_v3_job.rb
index fcfb72cf56..ef09aa8ccb 100644
--- a/api/app/jobs/packaging/exportation/export_text_to_epub_v3_job.rb
+++ b/api/app/jobs/packaging/exportation/export_text_to_epub_v3_job.rb
@@ -7,7 +7,7 @@ class ExportTextToEpubV3Job < ApplicationJob
around_perform :advisory_locked!
- unique :until_executed, lock_ttl: 15.minutes, on_conflict: :log
+ unique_job! by: :job
queue_as :default
diff --git a/api/app/jobs/resource_import_rows/import_job.rb b/api/app/jobs/resource_import_rows/import_job.rb
index 049b966540..3d026960de 100644
--- a/api/app/jobs/resource_import_rows/import_job.rb
+++ b/api/app/jobs/resource_import_rows/import_job.rb
@@ -3,11 +3,12 @@
# Simple job to process a resource import row
module ResourceImportRows
class ImportJob < ApplicationJob
- # Our acceptance tests use perform_now, which break if this is throttled.
- unless Rails.env.test?
- # concurrency 6, drop: false
- throttle threshold: 3, period: 0.5.seconds, drop: false
- end
+ include GoodJob::ActiveJobExtensions::Concurrency
+
+ good_job_control_concurrency_with(
+ perform_limit: 3,
+ key: -> { "ResourceImportRows::ImportJob" }
+ )
queue_as :low_priority
diff --git a/api/app/jobs/text_section_nodes/backport_search_index_job.rb b/api/app/jobs/text_section_nodes/backport_search_index_job.rb
index c5675a999c..55575149ea 100644
--- a/api/app/jobs/text_section_nodes/backport_search_index_job.rb
+++ b/api/app/jobs/text_section_nodes/backport_search_index_job.rb
@@ -6,7 +6,7 @@ class BackportSearchIndexJob < ApplicationJob
queue_as :low_priority
- unique :until_executed, lock_ttl: 2.days, on_conflict: :log
+ unique_job! by: :job
def build_enumerator(cursor:)
enumerator_builder.active_record_on_records(
diff --git a/api/app/lib/statistics.rb b/api/app/lib/statistics.rb
index 636f3e7511..fbdf241f21 100644
--- a/api/app/lib/statistics.rb
+++ b/api/app/lib/statistics.rb
@@ -9,13 +9,10 @@ class Statistics
include ActiveModel::Conversion
include Authority::Abilities
include SerializedAbilitiesFor
- include Redis::Objects
DOWNLOAD_EVENT_NAMES = %w(download_project download_journal).freeze
-
- lock :transaction, timeout: 1, expiration: 15
- value :this_week, marshal: true
- value :last_week, marshal: true
+ THIS_WEEK_KEY = "statistics:this_week"
+ LAST_WEEK_KEY = "statistics:last_week"
def id
0
@@ -24,30 +21,30 @@ def id
# @!attribute [rw] readers_this_week
# @return [Float]
def readers_this_week
- this_week.value.to_f
+ Rails.cache.read(THIS_WEEK_KEY).to_f
end
def readers_this_week=(new_value)
- this_week.value = new_value.to_f
+ Rails.cache.write(THIS_WEEK_KEY, new_value.to_f)
end
# @!attribute [rw] readers_last_week
# @return [Float]
def readers_last_week
- last_week.value.to_f
+ Rails.cache.read(LAST_WEEK_KEY).to_f
end
def readers_last_week=(new_value)
- last_week.value = new_value
+ Rails.cache.write(LAST_WEEK_KEY, new_value.to_f)
end
- # Update values in a redis lock.
+ # Update values within an advisory lock.
#
# @yieldparam [Statistics] instance the instance itself to update
# @yieldreturn [void]
# @return [void]
def update
- transaction_lock.lock do
+ ApplicationRecord.with_advisory_lock("statistics:update", timeout_seconds: 15) do
yield self if block_given?
end
end
diff --git a/api/app/services/concerns/job_concurrency.rb b/api/app/services/concerns/job_concurrency.rb
new file mode 100644
index 0000000000..535cef1fda
--- /dev/null
+++ b/api/app/services/concerns/job_concurrency.rb
@@ -0,0 +1,35 @@
+# frozen_string_literal: true
+
+module JobConcurrency
+ extend ActiveSupport::Concern
+
+ included do
+ include GoodJob::ActiveJobExtensions::Concurrency
+ end
+
+ module ClassMethods
+ # @param [:model, :job, Proc] by
+ # @return [void]
+ def unique_job!(by: :job)
+ key = unique_job_key_for(by:)
+
+ good_job_control_concurrency_with(
+ total_limit: 1,
+ key:,
+ )
+ end
+
+ # @param [:model, :job, Proc] by
+ # @return [Proc]
+ def unique_job_key_for(by: nil)
+ return by if by.kind_of?(Proc)
+
+ case by
+ in :model
+ -> { "#{self.class.name}-#{arguments.first.to_global_id}" }
+ in :job
+ -> { self.class.name.to_s }
+ end
+ end
+ end
+end
diff --git a/api/app/services/storage/shrine_properties.rb b/api/app/services/storage/shrine_properties.rb
index 6da588b0e1..9ec6b99b6b 100644
--- a/api/app/services/storage/shrine_properties.rb
+++ b/api/app/services/storage/shrine_properties.rb
@@ -23,16 +23,18 @@ def shrine_columns_for(model)
def models
tables.map do |table|
- table.classify.constantize
- rescue StandardError
- nil
- end.compact + plural_models
+ table.classify.safe_constantize
+ end.compact + plural_models - excluded_models
end
def plural_models
[Settings]
end
+ def excluded_models
+ [GoodJob]
+ end
+
def tables
ActiveRecord::Base.connection.tables - %w(schema_migrations comment_hierarchies comments) + %w(settings)
end
diff --git a/api/app/uploaders/concerns/shared_uploader.rb b/api/app/uploaders/concerns/shared_uploader.rb
index 2a5e7589f4..0fe33eac33 100644
--- a/api/app/uploaders/concerns/shared_uploader.rb
+++ b/api/app/uploaders/concerns/shared_uploader.rb
@@ -22,12 +22,19 @@ module SharedUploader
included do
plugin :add_metadata
- plugin :url_options, cache: URL_OPTIONS, store: URL_OPTIONS
+ plugin :url_options, cache: Storage::Factory.url_options, store: Storage::Factory.url_options
+
+ upload_options = { cache: { acl: "public-read" }, store: { acl: "public-read" } }
# In our specs we often attach files from fixtures. If we let Shrine move them, they're
# not available for other specs. Our import services also attach files from the file system,
# which should remain in place after the import runs. Until we sort out these issues, we
# should copy rather than move files into the cache.
- plugin :upload_options, cache: { move: false }, store: { move: true } if Storage::Factory.store_supports_move?
+ if Storage::Factory.store_supports_move?
+ upload_options[:cache][:move] = false
+ upload_options[:store][:move] = true
+ end
+ plugin :upload_options, **upload_options
+
plugin :pretty_location
add_metadata :sha256 do |io, _context|
diff --git a/api/bin/ensure-db b/api/bin/ensure-db
index 2e3ab6d040..ec3832e3e3 100755
--- a/api/bin/ensure-db
+++ b/api/bin/ensure-db
@@ -3,5 +3,6 @@
set -eux
bin/rails db:create
-bin/rails db:migrate
+bin/rails db:migrate:primary
+bin/rails db:migrate:cache
bin/rails db:seed
diff --git a/api/bin/zhong b/api/bin/good_job
similarity index 60%
rename from api/bin/zhong
rename to api/bin/good_job
index 1160b6bc15..1ee66a5773 100755
--- a/api/bin/zhong
+++ b/api/bin/good_job
@@ -4,18 +4,16 @@
#
# This file was generated by Bundler.
#
-# The application 'zhong' is installed as part of a gem, and
+# The application 'good_job' is installed as part of a gem, and
# this file is here to facilitate running it.
#
-require "pathname"
-ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../../Gemfile",
- Pathname.new(__FILE__).realpath)
+ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../Gemfile", __dir__)
bundle_binstub = File.expand_path("bundle", __dir__)
if File.file?(bundle_binstub)
- if File.read(bundle_binstub, 300) =~ /This file was generated by Bundler/
+ if File.read(bundle_binstub, 300).include?("This file was generated by Bundler")
load(bundle_binstub)
else
abort("Your `bin/bundle` was not generated by Bundler, so this binstub cannot run.
@@ -26,4 +24,4 @@ end
require "rubygems"
require "bundler/setup"
-load Gem.bin_path("zhong", "zhong")
+load Gem.bin_path("good_job", "good_job")
diff --git a/api/bin/sidekiq b/api/bin/sidekiq
deleted file mode 100755
index 83aab28f8c..0000000000
--- a/api/bin/sidekiq
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env ruby
-# frozen_string_literal: true
-#
-# This file was generated by Bundler.
-#
-# The application 'sidekiq' is installed as part of a gem, and
-# this file is here to facilitate running it.
-#
-
-require "pathname"
-ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../../Gemfile",
- Pathname.new(__FILE__).realpath)
-
-require "rubygems"
-require "bundler/setup"
-
-load Gem.bin_path("sidekiq", "sidekiq")
diff --git a/api/bin/sidekiq_dev b/api/bin/sidekiq_dev
deleted file mode 100755
index 660dc48b5b..0000000000
--- a/api/bin/sidekiq_dev
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-. ../.env
-if [ "$RAILS_ENV" == "development" ]; then
- exec ./bin/sidekiq
-fi
diff --git a/api/config/application.rb b/api/config/application.rb
index fed5b185ee..0af697c780 100644
--- a/api/config/application.rb
+++ b/api/config/application.rb
@@ -28,6 +28,7 @@ module Dotenv
class Railtie < Rails::Railtie
def load
Dotenv.load(
+ root.join("./.env"),
root.join("../.env.local"),
root.join("../.env.#{Rails.env}"),
root.join("../.env")
@@ -83,6 +84,12 @@ class Application < Rails::Application
# Skip views, helpers and assets when generating a new resource.
config.api_only = true
+ config.middleware.use ActionDispatch::RemoteIp
+ config.middleware.use Rack::MethodOverride
+ config.middleware.use ActionDispatch::Flash
+ config.middleware.use ActionDispatch::Cookies
+ config.middleware.use ActionDispatch::Session::CookieStore
+
config.eager_load_paths += [
"#{config.root}/app/jobs",
"#{config.root}/app/models",
@@ -104,10 +111,10 @@ class Application < Rails::Application
g.orm :active_record, primary_key_type: :uuid
end
- config.active_job.queue_adapter = :sidekiq
+ config.active_job.queue_adapter = :good_job
config.active_record.schema_format = :sql
- config.cache_store = :redis_cache_store, ManifoldEnv.redis.cache_options
+ config.cache_store = :solid_cache_store
end
end
diff --git a/api/config/database.yml b/api/config/database.yml
index 75b3e46915..856870d717 100644
--- a/api/config/database.yml
+++ b/api/config/database.yml
@@ -1,5 +1,4 @@
<%
-
user = ENV["RAILS_DB_USER"]
pass = ENV["RAILS_DB_PASS"]
host = ENV["RAILS_DB_HOST"] || 'localhost'
@@ -7,17 +6,35 @@
db = ENV["RAILS_DB_NAME"] || 'manifold_development'
test_db = ENV["RAILS_TEST_DB_NAME"] || 'manifold_test'
+ cache_user = ENV["RAILS_CACHE_DB_USER"]
+ cache_pass = ENV["RAILS_CACHE_DB_PASS"]
+ cache_host = ENV["RAILS_CACHE_DB_HOST"] || 'localhost'
+ cache_port = ENV["RAILS_CACHE_DB_PORT"] || 5432
+ cache_db = ENV["RAILS_CACHE_DB_NAME"] || 'manifold_cache_development'
+ cache_test_db = ENV["RAILS_CACHE_TEST_DB_NAME"] || 'manifold_cache_test'
%>
common: &common
- adapter: postgresql
- host: <%= host %>
- encoding: unicode
- pool: 50
- port: <%= port %>
- user: <%= user %>
- database: <%= db %>
- password: "<%= pass %>"
+ primary: &primary
+ adapter: postgresql
+ host: <%= host %>
+ encoding: unicode
+ pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 50 } %>
+ port: <%= port %>
+ user: <%= user %>
+ database: <%= db %>
+ password: "<%= pass %>"
+ cache: &cache
+ adapter: postgresql
+ host: <%= cache_host %>
+ encoding: unicode
+ pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 50 } %>
+ port: <%= cache_port %>
+ user: <%= cache_user %>
+ database: <%= cache_db %>
+ password: "<%= cache_pass %>"
+ migrations_paths: db/cache_migrate
+
production:
<<: *common
@@ -32,5 +49,11 @@ development:
<<: *common
test:
- <<: *common
- database: <%= test_db %><%= ENV["TEST_ENV_NUMBER"] %>
+ primary:
+ <<: *primary
+ database: <%= test_db %><%= ENV["TEST_ENV_NUMBER"] %>
+ cache:
+ <<: *cache
+ database: <%= cache_test_db %><%= ENV["TEST_ENV_NUMBER"] %>
+
+
diff --git a/api/config/environments/development.rb b/api/config/environments/development.rb
index a8d3d99ac8..c5bcada7d1 100644
--- a/api/config/environments/development.rb
+++ b/api/config/environments/development.rb
@@ -32,6 +32,7 @@
config.hosts << "www.example.com"
config.hosts << "localhost"
config.hosts << "manifold.lvh"
+ config.hosts << "web.manifold.orb.local"
config.hosts << "manifold-api.ngrok.io"
config.hosts << "manifold-dev.ngrok.io"
config.hosts << ENV["DOMAIN"]
diff --git a/api/config/initializers/10_redis.rb b/api/config/initializers/10_redis.rb
deleted file mode 100644
index 4009b9c27d..0000000000
--- a/api/config/initializers/10_redis.rb
+++ /dev/null
@@ -1,5 +0,0 @@
-# frozen_string_literal: true
-
-# TODO: Revisit in v7, remove redis-namespace.
-ENV["REDIS_NAMESPACE_QUIET"] = "true" # Disable deprecation warning
-Redis::Objects.redis = ManifoldEnv.redis.build_connection_pool
diff --git a/api/config/initializers/25_lockbox.rb b/api/config/initializers/25_lockbox.rb
index 2e409c1ce0..d27953484d 100644
--- a/api/config/initializers/25_lockbox.rb
+++ b/api/config/initializers/25_lockbox.rb
@@ -1,10 +1,14 @@
# frozen_string_literal: true
-secret_key_base = Rails.application.secrets.secret_key_base
-
-secret_key_base = secret_key_base.presence || Lockbox.generate_key if Rails.env.test?
+lockbox_master_key = if Rails.env.test?
+ Lockbox.generate_key
+ elsif ENV["LOCKBOX_MASTER_KEY"].present?
+ ENV["LOCKBOX_MASTER_KEY"]
+ else
+ Rails.application.secret_key_base
+ end
# Consistently ensure that it's a 64-character hexadecimal key
-enforced_master_key = secret_key_base.gsub(/[^a-zA-Z0-9]+/, "")[/\A([a-zA-Z0-9]{1,64})/, 1].rjust(64, "0")
+enforced_master_key = lockbox_master_key.gsub(/[^a-zA-Z0-9]+/, "")[/\A([a-zA-Z0-9]{1,64})/, 1].rjust(64, "0")
Lockbox.master_key = enforced_master_key
diff --git a/api/config/initializers/40_traffic_control.rb b/api/config/initializers/40_traffic_control.rb
deleted file mode 100644
index 350acf18eb..0000000000
--- a/api/config/initializers/40_traffic_control.rb
+++ /dev/null
@@ -1,20 +0,0 @@
-# frozen_string_literal: true
-
-module Patches
- module MakeTrafficControlSupportRedisNamespace
- def client_class_type(client)
- if client.instance_of?(::Redis::Namespace)
- Suo::Client::Redis
- else
- super
- end
- end
- end
-end
-
-ActiveJob::TrafficControl.singleton_class.prepend(
- Patches::MakeTrafficControlSupportRedisNamespace
-)
-ActiveJob::TrafficControl.client =
- ManifoldEnv.redis.build_connection_pool "traffic-control", size: 25
-ActiveJob::TrafficControl.cache_client = Rails.cache
diff --git a/api/config/initializers/good_job.rb b/api/config/initializers/good_job.rb
new file mode 100644
index 0000000000..bb45d4d741
--- /dev/null
+++ b/api/config/initializers/good_job.rb
@@ -0,0 +1,73 @@
+# frozen_string_literal: true
+
+Rails.application.configure do
+ # Future-proofing
+ config.good_job.smaller_number_is_higher_priority = true
+
+ queues = [
+ "+default,mailers,deletions,low_priority,ahoy,annotations:10",
+ ].join(?;)
+
+ config.good_job.cleanup_preserved_jobs_before_seconds_ago = 43_200 # half-day
+ config.good_job.preserve_job_records = true
+ config.good_job.retry_on_unhandled_error = false
+ config.good_job.on_thread_error = ->(exception) { Rollbar.error(exception) }
+ config.good_job.execution_mode = :external
+ config.good_job.queues = queues
+ config.good_job.max_threads = 5
+ config.good_job.poll_interval = 30 # seconds
+ config.good_job.shutdown_timeout = 25 # seconds
+ config.good_job.enable_cron = true
+ config.good_job.cron = {
+ "caches.refresh_project_collections": {
+ cron: "*/15 * * * *",
+ class: "::ProjectCollectionJobs::QueueCacheCollectionProjectsJob"
+ },
+ "caches.refresh_all_flag_status_data": {
+ cron: "*/10 * * * *",
+ class: "::Flags::RefreshAllStatusDataJob"
+ },
+ "entitlements.audit": {
+ cron: "*/15 * * * *",
+ class: "Entitlements::AuditJob"
+ },
+ "entitlements.check_expiration": {
+ cron: "0 * * * *",
+ class: "Entitlements::CheckExpirationJob"
+ },
+ "uploads.expire_shrine_cache": {
+ cron: "0 22 * * *",
+ class: "ExpireShrineCacheJob"
+ },
+ "uploads.expire_tus_uploads": {
+ cron: "0 23 * * *",
+ class: "ExpireTusUploadsJob"
+ },
+ "notifications.enqueue_user_daily_digests": {
+ cron: "0 6 * * *",
+ class: "Notifications::EnqueueDigestsJob"
+ },
+ "notifications.enqueue_user_weekly_digests": {
+ cron: "0 6 * * 0",
+ class: "Notifications::EnqueueDigestsJob"
+ },
+ "packaging.automate_text_exports": {
+ cron: "*/5 * * * *",
+ class: "Texts::AutomateExportsJob"
+ },
+ "packaging.prune_text_exports": {
+ cron: "0 1 * * *",
+ class: "TextExports::PruneJob"
+ },
+ "packaging.prune_project_exports": {
+ cron: "5 1 * * *",
+ class: "ProjectExports::PruneJob"
+ },
+ "packaging.prune_bag_it_temporary_directory": {
+ cron: "0 */4 * * *",
+ class: "Packaging::BagItSpec::PruneTemporaryDirectoryJob"
+ }
+ }
+
+ config.good_job.dashboard_default_locale = :en
+end
diff --git a/api/config/initializers/rack_attack.rb b/api/config/initializers/rack_attack.rb
index 75c82b6af1..f3e2ccac5c 100644
--- a/api/config/initializers/rack_attack.rb
+++ b/api/config/initializers/rack_attack.rb
@@ -1,60 +1,74 @@
# frozen_string_literal: true
-# :nocov:
-# We want to ensure that the public IP used by the client is never
-# accidentally blocklisted or throttled.
-unless Rails.env.development? || Rails.env.test?
- ManifoldEnv.rate_limiting.derive_public_ips! Rails.application.config.manifold.domain
-end
-
-ManifoldEnv.rate_limiting.public_ips.each do |public_ip|
- Rack::Attack.safelist_ip public_ip
-end
-# :nocov:
+# Needs to run after initialization, else Solid Cache won't be ready
+# Revisit after upgrading to Solid Cache 1.0
+ActiveSupport::Reloader.to_prepare do
+ # :nocov:
+ # We want to ensure that the public IP used by the client is never
+ # accidentally blocklisted or throttled.
+ unless Rails.env.development? || Rails.env.test?
+ if ENV["CLIENT_SERVER_IP"]
+ Rack::Attack.safelist_ip ENV["CLIENT_SERVER_IP"]
+ else
+ ManifoldEnv.rate_limiting.derive_public_ips! Rails.application.config.manifold.domain
+ end
+ end
-Rack::Attack.safelist("allow all GET requests") do |request|
- # We do not currently throttle GET requests.
- request.get?
-end
+ ManifoldEnv.rate_limiting.public_ips.each do |public_ip|
+ Rack::Attack.safelist_ip public_ip
+ end
+ # :nocov:
-Rack::Attack.safelist("mark any admin access safe") do |request|
- request.env["manifold_env.authorized_admin"]
-end
+ Rack::Attack.safelist("allow all GET requests") do |request|
+ # We do not currently throttle GET requests.
+ request.get?
+ end
-Rack::Attack.safelist("skip when disabled globally or per category") do |request|
- request.env["manifold_env.rate_limiting_disabled"]
-end
+ Rack::Attack.safelist("mark any admin access safe") do |request|
+ request.env["manifold_env.authorized_admin"]
+ end
-ManifoldEnv.rate_limiting.each_throttled_category do |throttler|
- Rack::Attack.throttle throttler.email_key, **throttler.options do |request|
- request.env["manifold_env.real_email"] if request.env["manifold_env.throttled_category"] == throttler.category
+ Rack::Attack.safelist("skip when disabled globally or per category") do |request|
+ request.env["manifold_env.rate_limiting_disabled"]
end
- Rack::Attack.throttle throttler.ip_key, **throttler.options do |request|
- request.ip if request.env["manifold_env.throttled_category"] == throttler.category
+ ManifoldEnv.rate_limiting.each_throttled_category do |throttler|
+ Rack::Attack.throttle throttler.email_key, **throttler.options do |request|
+ request.env["manifold_env.real_email"] if request.env["manifold_env.throttled_category"] == throttler.category
+ end
+
+ Rack::Attack.throttle throttler.ip_key, **throttler.options do |request|
+ next unless request.env["manifold_env.throttled_category"] == throttler.category
+
+ ENV.fetch("PROXY_CLIENT_IP_HEADER", "").split(/,\s*/).map do |header|
+ request.get_header(header)
+ end.push(request.env["action_dispatch.remote_ip"].to_s, request.ip)
+ .compact_blank
+ .first
+ end
end
-end
-ActiveSupport::Notifications.subscribe("blocklist.rack_attack") do |name, start, finish, request_id, payload|
- # :nocov:
- ThrottledRequest.track! payload[:request]
- # :nocov:
-end
+ ActiveSupport::Notifications.subscribe("blocklist.rack_attack") do |name, start, finish, request_id, payload|
+ # :nocov:
+ ThrottledRequest.track! payload[:request]
+ # :nocov:
+ end
-ActiveSupport::Notifications.subscribe("throttle.rack_attack") do |name, start, finish, request_id, payload|
- # :nocov:
- ThrottledRequest.track! payload[:request]
- # :nocov:
-end
+ ActiveSupport::Notifications.subscribe("throttle.rack_attack") do |name, start, finish, request_id, payload|
+ # :nocov:
+ ThrottledRequest.track! payload[:request]
+ # :nocov:
+ end
-Rack::Attack.blocklisted_responder = lambda do |request|
- # :nocov:
- [503, {}, ["Internal Server Error\n"]]
- # :nocov:
-end
+ Rack::Attack.blocklisted_responder = lambda do |request|
+ # :nocov:
+ [429, {}, ["Rate Limit Exceeded\n"]]
+ # :nocov:
+ end
-Rack::Attack.throttled_responder = lambda do |request|
- # :nocov:
- [503, {}, ["Internal Server Error\n"]]
- # :nocov:
+ Rack::Attack.throttled_responder = lambda do |request|
+ # :nocov:
+ [429, {}, ["Rate Limit Exceeded\n"]]
+ # :nocov:
+ end
end
diff --git a/api/config/initializers/sidekiq_and_redis.rb b/api/config/initializers/sidekiq_and_redis.rb
deleted file mode 100644
index e3b77c9096..0000000000
--- a/api/config/initializers/sidekiq_and_redis.rb
+++ /dev/null
@@ -1,9 +0,0 @@
-# frozen_string_literal: true
-
-Sidekiq.configure_server do |config|
- config.redis = ManifoldEnv.redis.sidekiq_options
-end
-
-Sidekiq.configure_client do |config|
- config.redis = ManifoldEnv.redis.sidekiq_options
-end
diff --git a/api/config/initializers/zhong.rb b/api/config/initializers/zhong.rb
deleted file mode 100644
index a4affd86e7..0000000000
--- a/api/config/initializers/zhong.rb
+++ /dev/null
@@ -1,3 +0,0 @@
-# frozen_string_literal: true
-
-require "#{Rails.root}/zhong.rb"
diff --git a/api/config/puma.rb b/api/config/puma.rb
index 7261e89bbd..70fb878c07 100644
--- a/api/config/puma.rb
+++ b/api/config/puma.rb
@@ -38,8 +38,11 @@
is_development ? 16 : 6
end
-pidfile pidfile_path
-state_path state_path
+if listen_on_socket
+ pidfile pidfile_path
+ state_path state_path
+end
+
tag "manifold-#{application}"
environment rails_environment
workers number_of_workers
@@ -60,13 +63,6 @@
ActiveSupport.on_load(:active_record) do
ActiveRecord::Base.connection.disconnect!
end
-
- # Ensure we disconnect from Rails cache on forking.
- Rails.cache.redis.disconnect!
-
- Redis.current.disconnect!
-
- Redis::Objects.redis.disconnect!
end
on_worker_boot do
diff --git a/api/config/routes.rb b/api/config/routes.rb
index b6fcf6e64e..ff769979bd 100644
--- a/api/config/routes.rb
+++ b/api/config/routes.rb
@@ -1,8 +1,5 @@
# frozen_string_literal: true
-require "sidekiq/web"
-require "zhong/web"
-
Rails.application.routes.draw do
concern :flaggable do
resource :flags, controller: "/api/v1/flags", only: [:create, :destroy] do
@@ -19,10 +16,12 @@
end
constraints ->(request) { AuthConstraint.new(request).admin? || Rails.env.development? } do
- mount Sidekiq::Web => "/api/sidekiq"
- mount Zhong::Web, at: "/api/zhong"
+ mount GoodJob::Engine => '/api/good_job'
end
+ get "up" => "health#show"
+ get "api/up" => "health#show"
+
get "auth/:provider/callback", to: "oauth#authorize"
namespace :api do
diff --git a/api/config/sidekiq.yml b/api/config/sidekiq.yml
deleted file mode 100644
index e156d892ed..0000000000
--- a/api/config/sidekiq.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-:queues:
- - default
- - mailers
- - deletions
- - low_priority
- - ahoy
- - annotations
diff --git a/api/config/solid_cache.yml b/api/config/solid_cache.yml
new file mode 100644
index 0000000000..1ef713acde
--- /dev/null
+++ b/api/config/solid_cache.yml
@@ -0,0 +1,21 @@
+default: &default
+ database: cache
+ store_options:
+ max_age: <%= 1.week.to_i %>
+ max_size: <%= 256.megabytes %>
+ namespace: <%= ENV.fetch("RAILS_CACHE_NAMESPACE", "manifold") %>
+
+production:
+ <<: *default
+
+demo:
+ <<: *default
+
+staging:
+ <<: *default
+
+development:
+ <<: *default
+
+test:
+ <<: *default
diff --git a/api/db/cache_migrate/20260209194905_create_solid_cache_entries.solid_cache.rb b/api/db/cache_migrate/20260209194905_create_solid_cache_entries.solid_cache.rb
new file mode 100644
index 0000000000..d52baee79a
--- /dev/null
+++ b/api/db/cache_migrate/20260209194905_create_solid_cache_entries.solid_cache.rb
@@ -0,0 +1,14 @@
+# frozen_string_literal: true
+
+# This migration comes from solid_cache (originally 20230724121448)
+class CreateSolidCacheEntries < ActiveRecord::Migration[7.0]
+ def change
+ create_table :solid_cache_entries do |t|
+ t.binary :key, null: false, limit: 1024
+ t.binary :value, null: false, limit: 512.megabytes
+ t.datetime :created_at, null: false
+
+ t.index :key, unique: true
+ end
+ end
+end
diff --git a/api/db/cache_migrate/20260209194906_add_key_hash_and_byte_size_to_solid_cache_entries.solid_cache.rb b/api/db/cache_migrate/20260209194906_add_key_hash_and_byte_size_to_solid_cache_entries.solid_cache.rb
new file mode 100644
index 0000000000..2dd1a5de12
--- /dev/null
+++ b/api/db/cache_migrate/20260209194906_add_key_hash_and_byte_size_to_solid_cache_entries.solid_cache.rb
@@ -0,0 +1,11 @@
+# frozen_string_literal: true
+
+# This migration comes from solid_cache (originally 20240108155507)
+class AddKeyHashAndByteSizeToSolidCacheEntries < ActiveRecord::Migration[7.0]
+ def change
+ change_table :solid_cache_entries do |t| # rubocop:disable Rails/BulkChangeTable
+ t.column :key_hash, :integer, null: true, limit: 8
+ t.column :byte_size, :integer, null: true, limit: 4
+ end
+ end
+end
diff --git a/api/db/cache_migrate/20260209194907_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.solid_cache.rb b/api/db/cache_migrate/20260209194907_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.solid_cache.rb
new file mode 100644
index 0000000000..a20239eab1
--- /dev/null
+++ b/api/db/cache_migrate/20260209194907_add_key_hash_and_byte_size_indexes_and_null_constraints_to_solid_cache_entries.solid_cache.rb
@@ -0,0 +1,14 @@
+# frozen_string_literal: true
+
+# This migration comes from solid_cache (originally 20240110111600)
+class AddKeyHashAndByteSizeIndexesAndNullConstraintsToSolidCacheEntries < ActiveRecord::Migration[7.0]
+ def change
+ change_table :solid_cache_entries, bulk: true do |t|
+ t.change_null :key_hash, false
+ t.change_null :byte_size, false
+ t.index :key_hash, unique: true
+ t.index [:key_hash, :byte_size]
+ t.index :byte_size
+ end
+ end
+end
diff --git a/api/db/cache_migrate/20260209194908_remove_key_index_from_solid_cache_entries.solid_cache.rb b/api/db/cache_migrate/20260209194908_remove_key_index_from_solid_cache_entries.solid_cache.rb
new file mode 100644
index 0000000000..2f7e1f2486
--- /dev/null
+++ b/api/db/cache_migrate/20260209194908_remove_key_index_from_solid_cache_entries.solid_cache.rb
@@ -0,0 +1,10 @@
+# frozen_string_literal: true
+
+# This migration comes from solid_cache (originally 20240110111702)
+class RemoveKeyIndexFromSolidCacheEntries < ActiveRecord::Migration[7.0]
+ def change
+ change_table :solid_cache_entries do |t|
+ t.remove_index :key, unique: true
+ end
+ end
+end
diff --git a/api/db/cache_structure.sql b/api/db/cache_structure.sql
new file mode 100644
index 0000000000..4d0d2edded
--- /dev/null
+++ b/api/db/cache_structure.sql
@@ -0,0 +1,141 @@
+\restrict acy6eTEdEyCwlH42B66Wg4BSGl2eqioDwK0CMSORx0X8L8IA6xc2r6aweKCXVYY
+
+-- Dumped from database version 13.22
+-- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1)
+
+SET statement_timeout = 0;
+SET lock_timeout = 0;
+SET idle_in_transaction_session_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = on;
+SELECT pg_catalog.set_config('search_path', '', false);
+SET check_function_bodies = false;
+SET xmloption = content;
+SET client_min_messages = warning;
+SET row_security = off;
+
+SET default_tablespace = '';
+
+SET default_table_access_method = heap;
+
+--
+-- Name: ar_internal_metadata; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.ar_internal_metadata (
+ key character varying NOT NULL,
+ value character varying,
+ created_at timestamp(6) without time zone NOT NULL,
+ updated_at timestamp(6) without time zone NOT NULL
+);
+
+
+--
+-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.schema_migrations (
+ version character varying NOT NULL
+);
+
+
+--
+-- Name: solid_cache_entries; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.solid_cache_entries (
+ id bigint NOT NULL,
+ key bytea NOT NULL,
+ value bytea NOT NULL,
+ created_at timestamp(6) without time zone NOT NULL,
+ key_hash bigint NOT NULL,
+ byte_size integer NOT NULL
+);
+
+
+--
+-- Name: solid_cache_entries_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.solid_cache_entries_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+--
+-- Name: solid_cache_entries_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.solid_cache_entries_id_seq OWNED BY public.solid_cache_entries.id;
+
+
+--
+-- Name: solid_cache_entries id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.solid_cache_entries ALTER COLUMN id SET DEFAULT nextval('public.solid_cache_entries_id_seq'::regclass);
+
+
+--
+-- Name: ar_internal_metadata ar_internal_metadata_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.ar_internal_metadata
+ ADD CONSTRAINT ar_internal_metadata_pkey PRIMARY KEY (key);
+
+
+--
+-- Name: schema_migrations schema_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.schema_migrations
+ ADD CONSTRAINT schema_migrations_pkey PRIMARY KEY (version);
+
+
+--
+-- Name: solid_cache_entries solid_cache_entries_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.solid_cache_entries
+ ADD CONSTRAINT solid_cache_entries_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: index_solid_cache_entries_on_byte_size; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_solid_cache_entries_on_byte_size ON public.solid_cache_entries USING btree (byte_size);
+
+
+--
+-- Name: index_solid_cache_entries_on_key_hash; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_solid_cache_entries_on_key_hash ON public.solid_cache_entries USING btree (key_hash);
+
+
+--
+-- Name: index_solid_cache_entries_on_key_hash_and_byte_size; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_solid_cache_entries_on_key_hash_and_byte_size ON public.solid_cache_entries USING btree (key_hash, byte_size);
+
+
+--
+-- PostgreSQL database dump complete
+--
+
+\unrestrict acy6eTEdEyCwlH42B66Wg4BSGl2eqioDwK0CMSORx0X8L8IA6xc2r6aweKCXVYY
+
+SET search_path TO "$user", public;
+
+INSERT INTO "schema_migrations" (version) VALUES
+('20260209194905'),
+('20260209194906'),
+('20260209194907'),
+('20260209194908');
+
+
diff --git a/api/db/migrate/20250527180248_add_search_indexed_to_text_section_nodes.rb b/api/db/migrate/20250527180248_add_search_indexed_to_text_section_nodes.rb
index b1f1c64743..8c5a61c824 100644
--- a/api/db/migrate/20250527180248_add_search_indexed_to_text_section_nodes.rb
+++ b/api/db/migrate/20250527180248_add_search_indexed_to_text_section_nodes.rb
@@ -13,10 +13,13 @@ def change
reversible do |dir|
dir.up do
if !Rails.env.test? && defined?(::TextSectionNodes::BackportSearchIndexJob)
- begin
- ::TextSectionNodes::BackportSearchIndexJob.set(wait: 10.minutes).perform_later
- rescue StandardError
- # Intentionally left blank
+ # If this is our first migration, good_jobs won't exist yet, but the db is empty anyway
+ if connection.table_exists?("good_jobs")
+ begin
+ ::TextSectionNodes::BackportSearchIndexJob.set(wait: 10.minutes).perform_later
+ rescue StandardError
+ # Intentionally left blank
+ end
end
end
end
diff --git a/api/db/migrate/20250603170620_create_good_jobs.rb b/api/db/migrate/20250603170620_create_good_jobs.rb
new file mode 100644
index 0000000000..6cfd24455c
--- /dev/null
+++ b/api/db/migrate/20250603170620_create_good_jobs.rb
@@ -0,0 +1,102 @@
+# frozen_string_literal: true
+
+class CreateGoodJobs < ActiveRecord::Migration[7.0]
+ def change
+ # Uncomment for Postgres v12 or earlier to enable gen_random_uuid() support
+ # enable_extension 'pgcrypto'
+
+ create_table :good_jobs, id: :uuid do |t|
+ t.text :queue_name
+ t.integer :priority
+ t.jsonb :serialized_params
+ t.datetime :scheduled_at
+ t.datetime :performed_at
+ t.datetime :finished_at
+ t.text :error
+
+ t.timestamps
+
+ t.uuid :active_job_id
+ t.text :concurrency_key
+ t.text :cron_key
+ t.uuid :retried_good_job_id
+ t.datetime :cron_at
+
+ t.uuid :batch_id
+ t.uuid :batch_callback_id
+
+ t.boolean :is_discrete
+ t.integer :executions_count
+ t.text :job_class
+ t.integer :error_event, limit: 2
+ t.text :labels, array: true
+ t.uuid :locked_by_id
+ t.datetime :locked_at
+ end
+
+ create_table :good_job_batches, id: :uuid do |t|
+ t.timestamps
+ t.text :description
+ t.jsonb :serialized_properties
+ t.text :on_finish
+ t.text :on_success
+ t.text :on_discard
+ t.text :callback_queue_name
+ t.integer :callback_priority
+ t.datetime :enqueued_at
+ t.datetime :discarded_at
+ t.datetime :finished_at
+ end
+
+ create_table :good_job_executions, id: :uuid do |t|
+ t.timestamps
+
+ t.uuid :active_job_id, null: false
+ t.text :job_class
+ t.text :queue_name
+ t.jsonb :serialized_params
+ t.datetime :scheduled_at
+ t.datetime :finished_at
+ t.text :error
+ t.integer :error_event, limit: 2
+ t.text :error_backtrace, array: true
+ t.uuid :process_id
+ t.interval :duration
+ end
+
+ create_table :good_job_processes, id: :uuid do |t|
+ t.timestamps
+ t.jsonb :state
+ t.integer :lock_type, limit: 2
+ end
+
+ create_table :good_job_settings, id: :uuid do |t|
+ t.timestamps
+ t.text :key
+ t.jsonb :value
+ t.index :key, unique: true
+ end
+
+ add_index :good_jobs, :scheduled_at, where: "(finished_at IS NULL)", name: :index_good_jobs_on_scheduled_at
+ add_index :good_jobs, [:queue_name, :scheduled_at], where: "(finished_at IS NULL)", name: :index_good_jobs_on_queue_name_and_scheduled_at
+ add_index :good_jobs, [:active_job_id, :created_at], name: :index_good_jobs_on_active_job_id_and_created_at
+ add_index :good_jobs, :concurrency_key, where: "(finished_at IS NULL)", name: :index_good_jobs_on_concurrency_key_when_unfinished
+ add_index :good_jobs, [:cron_key, :created_at], where: "(cron_key IS NOT NULL)", name: :index_good_jobs_on_cron_key_and_created_at_cond
+ add_index :good_jobs, [:cron_key, :cron_at], where: "(cron_key IS NOT NULL)", unique: true, name: :index_good_jobs_on_cron_key_and_cron_at_cond
+ add_index :good_jobs, [:finished_at], where: "retried_good_job_id IS NULL AND finished_at IS NOT NULL", name: :index_good_jobs_jobs_on_finished_at
+ add_index :good_jobs, [:priority, :created_at], order: { priority: "DESC NULLS LAST", created_at: :asc },
+ where: "finished_at IS NULL", name: :index_good_jobs_jobs_on_priority_created_at_when_unfinished
+ add_index :good_jobs, [:priority, :created_at], order: { priority: "ASC NULLS LAST", created_at: :asc },
+ where: "finished_at IS NULL", name: :index_good_job_jobs_for_candidate_lookup
+ add_index :good_jobs, [:batch_id], where: "batch_id IS NOT NULL"
+ add_index :good_jobs, [:batch_callback_id], where: "batch_callback_id IS NOT NULL"
+ add_index :good_jobs, :labels, using: :gin, where: "(labels IS NOT NULL)", name: :index_good_jobs_on_labels
+
+ add_index :good_job_executions, [:active_job_id, :created_at], name: :index_good_job_executions_on_active_job_id_and_created_at
+ add_index :good_jobs, [:priority, :scheduled_at], order: { priority: "ASC NULLS LAST", scheduled_at: :asc },
+ where: "finished_at IS NULL AND locked_by_id IS NULL", name: :index_good_jobs_on_priority_scheduled_at_unfinished_unlocked
+ add_index :good_jobs, :locked_by_id,
+ where: "locked_by_id IS NOT NULL", name: "index_good_jobs_on_locked_by_id"
+ add_index :good_job_executions, [:process_id, :created_at], name: :index_good_job_executions_on_process_id_and_created_at
+ end
+end
diff --git a/api/db/structure.sql b/api/db/structure.sql
index 090f84fbb1..32e1d5616e 100644
--- a/api/db/structure.sql
+++ b/api/db/structure.sql
@@ -1,3 +1,8 @@
+\restrict LNheIDBOnK0QpSehtUWNxnxD3q94AfOg7XcUemfZCwkIiA26sNgFEmPkBcpqG1A
+
+-- Dumped from database version 13.22
+-- Dumped by pg_dump version 13.22 (Debian 13.22-1.pgdg11+1)
+
SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
@@ -468,7 +473,8 @@ CREATE TABLE public.annotations (
marked_for_purge_at timestamp without time zone,
resolved_flags_count bigint DEFAULT 0 NOT NULL,
unresolved_flags_count bigint DEFAULT 0 NOT NULL,
- flagger_ids uuid[] DEFAULT '{}'::uuid[] NOT NULL
+ flagger_ids uuid[] DEFAULT '{}'::uuid[] NOT NULL,
+ reader_display_format text
);
@@ -860,7 +866,9 @@ CREATE TABLE public.projects (
marked_for_purge_at timestamp without time zone,
social_image_data jsonb,
social_description text,
- social_title text
+ social_title text,
+ orphaned_journal_issue_id uuid,
+ orphaned_journal_issue boolean DEFAULT false NOT NULL
);
@@ -1092,8 +1100,8 @@ CREATE TABLE public.journal_issues (
journal_id uuid NOT NULL,
journal_volume_id uuid,
creator_id uuid,
- fa_cache jsonb DEFAULT '{}'::jsonb NOT NULL,
number character varying DEFAULT ''::character varying NOT NULL,
+ fa_cache jsonb DEFAULT '{}'::jsonb NOT NULL,
sort_title integer DEFAULT 0 NOT NULL,
pending_sort_title integer
);
@@ -1511,6 +1519,20 @@ CREATE TABLE public.export_targets (
);
+--
+-- Name: external_identifiers; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.external_identifiers (
+ id uuid DEFAULT gen_random_uuid() NOT NULL,
+ identifier character varying NOT NULL,
+ identifiable_type character varying,
+ identifiable_id uuid,
+ created_at timestamp(6) without time zone NOT NULL,
+ updated_at timestamp(6) without time zone NOT NULL
+);
+
+
--
-- Name: user_collected_composite_entries; Type: TABLE; Schema: public; Owner: -
--
@@ -1675,6 +1697,107 @@ CREATE SEQUENCE public.friendly_id_slugs_id_seq
ALTER SEQUENCE public.friendly_id_slugs_id_seq OWNED BY public.friendly_id_slugs.id;
+--
+-- Name: good_job_batches; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.good_job_batches (
+ id uuid DEFAULT gen_random_uuid() NOT NULL,
+ created_at timestamp(6) without time zone NOT NULL,
+ updated_at timestamp(6) without time zone NOT NULL,
+ description text,
+ serialized_properties jsonb,
+ on_finish text,
+ on_success text,
+ on_discard text,
+ callback_queue_name text,
+ callback_priority integer,
+ enqueued_at timestamp(6) without time zone,
+ discarded_at timestamp(6) without time zone,
+ finished_at timestamp(6) without time zone
+);
+
+
+--
+-- Name: good_job_executions; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.good_job_executions (
+ id uuid DEFAULT gen_random_uuid() NOT NULL,
+ created_at timestamp(6) without time zone NOT NULL,
+ updated_at timestamp(6) without time zone NOT NULL,
+ active_job_id uuid NOT NULL,
+ job_class text,
+ queue_name text,
+ serialized_params jsonb,
+ scheduled_at timestamp(6) without time zone,
+ finished_at timestamp(6) without time zone,
+ error text,
+ error_event smallint,
+ error_backtrace text[],
+ process_id uuid,
+ duration interval
+);
+
+
+--
+-- Name: good_job_processes; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.good_job_processes (
+ id uuid DEFAULT gen_random_uuid() NOT NULL,
+ created_at timestamp(6) without time zone NOT NULL,
+ updated_at timestamp(6) without time zone NOT NULL,
+ state jsonb,
+ lock_type smallint
+);
+
+
+--
+-- Name: good_job_settings; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.good_job_settings (
+ id uuid DEFAULT gen_random_uuid() NOT NULL,
+ created_at timestamp(6) without time zone NOT NULL,
+ updated_at timestamp(6) without time zone NOT NULL,
+ key text,
+ value jsonb
+);
+
+
+--
+-- Name: good_jobs; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.good_jobs (
+ id uuid DEFAULT gen_random_uuid() NOT NULL,
+ queue_name text,
+ priority integer,
+ serialized_params jsonb,
+ scheduled_at timestamp(6) without time zone,
+ performed_at timestamp(6) without time zone,
+ finished_at timestamp(6) without time zone,
+ error text,
+ created_at timestamp(6) without time zone NOT NULL,
+ updated_at timestamp(6) without time zone NOT NULL,
+ active_job_id uuid,
+ concurrency_key text,
+ cron_key text,
+ retried_good_job_id uuid,
+ cron_at timestamp(6) without time zone,
+ batch_id uuid,
+ batch_callback_id uuid,
+ is_discrete boolean,
+ executions_count integer,
+ job_class text,
+ error_event smallint,
+ labels text[],
+ locked_by_id uuid,
+ locked_at timestamp(6) without time zone
+);
+
+
--
-- Name: identities; Type: TABLE; Schema: public; Owner: -
--
@@ -2039,7 +2162,9 @@ CREATE TABLE public.pg_search_documents (
metadata jsonb,
created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
- tsv_composite tsvector GENERATED ALWAYS AS ((((((((public.to_unaccented_weighted_tsv(title, 'A'::"char") || public.to_unaccented_weighted_tsv(primary_data, 'A'::"char")) || public.to_unaccented_weighted_tsv(secondary, 'B'::"char")) || public.to_unaccented_weighted_tsv(secondary_data, 'B'::"char")) || public.to_unaccented_weighted_tsv(tertiary, 'C'::"char")) || public.to_unaccented_weighted_tsv(tertiary_data, 'C'::"char")) || public.to_unaccented_weighted_tsv(content, 'D'::"char")) || public.to_unaccented_weighted_tsv(metadata, 'D'::"char"))) STORED NOT NULL
+ tsv_composite tsvector GENERATED ALWAYS AS ((((((((public.to_unaccented_weighted_tsv(title, 'A'::"char") || public.to_unaccented_weighted_tsv(primary_data, 'A'::"char")) || public.to_unaccented_weighted_tsv(secondary, 'B'::"char")) || public.to_unaccented_weighted_tsv(secondary_data, 'B'::"char")) || public.to_unaccented_weighted_tsv(tertiary, 'C'::"char")) || public.to_unaccented_weighted_tsv(tertiary_data, 'C'::"char")) || public.to_unaccented_weighted_tsv(content, 'D'::"char")) || public.to_unaccented_weighted_tsv(metadata, 'D'::"char"))) STORED NOT NULL,
+ journal_issue_id uuid,
+ journal_content boolean DEFAULT false NOT NULL
);
@@ -3339,6 +3464,47 @@ CREATE VIEW public.user_derived_roles AS
GROUP BY u.id;
+--
+-- Name: user_group_entitleables; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.user_group_entitleables (
+ id uuid DEFAULT gen_random_uuid() NOT NULL,
+ user_group_id uuid NOT NULL,
+ entitleable_type character varying NOT NULL,
+ entitleable_id uuid NOT NULL,
+ created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
+ updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL
+);
+
+
+--
+-- Name: user_group_memberships; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.user_group_memberships (
+ id uuid DEFAULT gen_random_uuid() NOT NULL,
+ user_id uuid NOT NULL,
+ user_group_id uuid NOT NULL,
+ source_type character varying,
+ source_id uuid,
+ created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
+ updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL
+);
+
+
+--
+-- Name: user_groups; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.user_groups (
+ id uuid DEFAULT gen_random_uuid() NOT NULL,
+ name text NOT NULL,
+ created_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
+ updated_at timestamp(6) without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL
+);
+
+
--
-- Name: version_associations; Type: TABLE; Schema: public; Owner: -
--
@@ -3682,6 +3848,14 @@ ALTER TABLE ONLY public.export_targets
ADD CONSTRAINT export_targets_pkey PRIMARY KEY (id);
+--
+-- Name: external_identifiers external_identifiers_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.external_identifiers
+ ADD CONSTRAINT external_identifiers_pkey PRIMARY KEY (id);
+
+
--
-- Name: features features_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
@@ -3706,6 +3880,46 @@ ALTER TABLE ONLY public.friendly_id_slugs
ADD CONSTRAINT friendly_id_slugs_pkey PRIMARY KEY (id);
+--
+-- Name: good_job_batches good_job_batches_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.good_job_batches
+ ADD CONSTRAINT good_job_batches_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: good_job_executions good_job_executions_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.good_job_executions
+ ADD CONSTRAINT good_job_executions_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: good_job_processes good_job_processes_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.good_job_processes
+ ADD CONSTRAINT good_job_processes_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: good_job_settings good_job_settings_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.good_job_settings
+ ADD CONSTRAINT good_job_settings_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: good_jobs good_jobs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.good_jobs
+ ADD CONSTRAINT good_jobs_pkey PRIMARY KEY (id);
+
+
--
-- Name: identities identities_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
@@ -4250,6 +4464,30 @@ ALTER TABLE ONLY public.user_collected_texts
ADD CONSTRAINT user_collected_texts_pkey PRIMARY KEY (id);
+--
+-- Name: user_group_entitleables user_group_entitleables_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.user_group_entitleables
+ ADD CONSTRAINT user_group_entitleables_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: user_group_memberships user_group_memberships_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.user_group_memberships
+ ADD CONSTRAINT user_group_memberships_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: user_groups user_groups_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.user_groups
+ ADD CONSTRAINT user_groups_pkey PRIMARY KEY (id);
+
+
--
-- Name: users users_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
@@ -4876,6 +5114,20 @@ CREATE UNIQUE INDEX index_export_targets_on_slug ON public.export_targets USING
CREATE INDEX index_export_targets_on_strategy ON public.export_targets USING btree (strategy);
+--
+-- Name: index_external_identifiers_on_identifiable; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_external_identifiers_on_identifiable ON public.external_identifiers USING btree (identifiable_type, identifiable_id);
+
+
+--
+-- Name: index_external_identifiers_on_identifier; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_external_identifiers_on_identifier ON public.external_identifiers USING btree (identifier);
+
+
--
-- Name: index_flags_on_flaggable_type_and_flaggable_id; Type: INDEX; Schema: public; Owner: -
--
@@ -4925,6 +5177,125 @@ CREATE INDEX index_friendly_id_slugs_on_sluggable_id ON public.friendly_id_slugs
CREATE INDEX index_friendly_id_slugs_on_sluggable_type ON public.friendly_id_slugs USING btree (sluggable_type);
+--
+-- Name: index_good_job_executions_on_active_job_id_and_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_job_executions_on_active_job_id_and_created_at ON public.good_job_executions USING btree (active_job_id, created_at);
+
+
+--
+-- Name: index_good_job_executions_on_process_id_and_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_job_executions_on_process_id_and_created_at ON public.good_job_executions USING btree (process_id, created_at);
+
+
+--
+-- Name: index_good_job_jobs_for_candidate_lookup; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_job_jobs_for_candidate_lookup ON public.good_jobs USING btree (priority, created_at) WHERE (finished_at IS NULL);
+
+
+--
+-- Name: index_good_job_settings_on_key; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_good_job_settings_on_key ON public.good_job_settings USING btree (key);
+
+
+--
+-- Name: index_good_jobs_jobs_on_finished_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_jobs_jobs_on_finished_at ON public.good_jobs USING btree (finished_at) WHERE ((retried_good_job_id IS NULL) AND (finished_at IS NOT NULL));
+
+
+--
+-- Name: index_good_jobs_jobs_on_priority_created_at_when_unfinished; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_jobs_jobs_on_priority_created_at_when_unfinished ON public.good_jobs USING btree (priority DESC NULLS LAST, created_at) WHERE (finished_at IS NULL);
+
+
+--
+-- Name: index_good_jobs_on_active_job_id_and_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_jobs_on_active_job_id_and_created_at ON public.good_jobs USING btree (active_job_id, created_at);
+
+
+--
+-- Name: index_good_jobs_on_batch_callback_id; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_jobs_on_batch_callback_id ON public.good_jobs USING btree (batch_callback_id) WHERE (batch_callback_id IS NOT NULL);
+
+
+--
+-- Name: index_good_jobs_on_batch_id; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_jobs_on_batch_id ON public.good_jobs USING btree (batch_id) WHERE (batch_id IS NOT NULL);
+
+
+--
+-- Name: index_good_jobs_on_concurrency_key_when_unfinished; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_jobs_on_concurrency_key_when_unfinished ON public.good_jobs USING btree (concurrency_key) WHERE (finished_at IS NULL);
+
+
+--
+-- Name: index_good_jobs_on_cron_key_and_created_at_cond; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_jobs_on_cron_key_and_created_at_cond ON public.good_jobs USING btree (cron_key, created_at) WHERE (cron_key IS NOT NULL);
+
+
+--
+-- Name: index_good_jobs_on_cron_key_and_cron_at_cond; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_good_jobs_on_cron_key_and_cron_at_cond ON public.good_jobs USING btree (cron_key, cron_at) WHERE (cron_key IS NOT NULL);
+
+
+--
+-- Name: index_good_jobs_on_labels; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_jobs_on_labels ON public.good_jobs USING gin (labels) WHERE (labels IS NOT NULL);
+
+
+--
+-- Name: index_good_jobs_on_locked_by_id; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_jobs_on_locked_by_id ON public.good_jobs USING btree (locked_by_id) WHERE (locked_by_id IS NOT NULL);
+
+
+--
+-- Name: index_good_jobs_on_priority_scheduled_at_unfinished_unlocked; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_jobs_on_priority_scheduled_at_unfinished_unlocked ON public.good_jobs USING btree (priority, scheduled_at) WHERE ((finished_at IS NULL) AND (locked_by_id IS NULL));
+
+
+--
+-- Name: index_good_jobs_on_queue_name_and_scheduled_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_jobs_on_queue_name_and_scheduled_at ON public.good_jobs USING btree (queue_name, scheduled_at) WHERE (finished_at IS NULL);
+
+
+--
+-- Name: index_good_jobs_on_scheduled_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_good_jobs_on_scheduled_at ON public.good_jobs USING btree (scheduled_at) WHERE (finished_at IS NULL);
+
+
--
-- Name: index_identities_on_uid_and_provider; Type: INDEX; Schema: public; Owner: -
--
@@ -5226,6 +5597,13 @@ CREATE INDEX index_pending_entitlements_on_user_id ON public.pending_entitlement
CREATE INDEX index_pg_search_documents_on_journal_id ON public.pg_search_documents USING btree (journal_id);
+--
+-- Name: index_pg_search_documents_on_journal_issue_id; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_pg_search_documents_on_journal_issue_id ON public.pg_search_documents USING btree (journal_issue_id);
+
+
--
-- Name: index_pg_search_documents_on_project_id; Type: INDEX; Schema: public; Owner: -
--
@@ -6507,6 +6885,62 @@ CREATE INDEX index_user_collected_text_sections_on_user_id ON public.user_collec
CREATE INDEX index_user_collected_texts_on_user_id ON public.user_collected_texts USING btree (user_id);
+--
+-- Name: index_user_group_entitleables_on_entitleable; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_user_group_entitleables_on_entitleable ON public.user_group_entitleables USING btree (entitleable_type, entitleable_id);
+
+
+--
+-- Name: index_user_group_entitleables_on_user_group_and_entitleable; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_user_group_entitleables_on_user_group_and_entitleable ON public.user_group_entitleables USING btree (user_group_id, entitleable_type, entitleable_id);
+
+
+--
+-- Name: index_user_group_entitleables_on_user_group_id; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_user_group_entitleables_on_user_group_id ON public.user_group_entitleables USING btree (user_group_id);
+
+
+--
+-- Name: index_user_group_memberships_on_source; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_user_group_memberships_on_source ON public.user_group_memberships USING btree (source_type, source_id);
+
+
+--
+-- Name: index_user_group_memberships_on_user_group_id; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_user_group_memberships_on_user_group_id ON public.user_group_memberships USING btree (user_group_id);
+
+
+--
+-- Name: index_user_group_memberships_on_user_id; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_user_group_memberships_on_user_id ON public.user_group_memberships USING btree (user_id);
+
+
+--
+-- Name: index_user_group_memberships_on_user_id_and_user_group_id; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_user_group_memberships_on_user_id_and_user_group_id ON public.user_group_memberships USING btree (user_id, user_group_id);
+
+
+--
+-- Name: index_user_groups_on_name; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_user_groups_on_name ON public.user_groups USING btree (name);
+
+
--
-- Name: index_users_on_deleted_at; Type: INDEX; Schema: public; Owner: -
--
@@ -6746,6 +7180,22 @@ ALTER TABLE ONLY public.user_collected_texts
ADD CONSTRAINT fk_rails_127b46870c FOREIGN KEY (text_id) REFERENCES public.texts(id) ON DELETE CASCADE;
+--
+-- Name: journal_issues fk_rails_159f2e66d4; Type: FK CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.journal_issues
+ ADD CONSTRAINT fk_rails_159f2e66d4 FOREIGN KEY (journal_id) REFERENCES public.journals(id) ON DELETE RESTRICT;
+
+
+--
+-- Name: journal_issues fk_rails_15a20a3530; Type: FK CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.journal_issues
+ ADD CONSTRAINT fk_rails_15a20a3530 FOREIGN KEY (journal_volume_id) REFERENCES public.journal_volumes(id) ON DELETE RESTRICT;
+
+
--
-- Name: entitlement_import_transitions fk_rails_19acd61494; Type: FK CONSTRAINT; Schema: public; Owner: -
--
@@ -6802,6 +7252,14 @@ ALTER TABLE ONLY public.pending_entitlement_transitions
ADD CONSTRAINT fk_rails_292c17a15e FOREIGN KEY (pending_entitlement_id) REFERENCES public.pending_entitlements(id) ON DELETE CASCADE;
+--
+-- Name: projects fk_rails_2a006842be; Type: FK CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.projects
+ ADD CONSTRAINT fk_rails_2a006842be FOREIGN KEY (journal_issue_id) REFERENCES public.journal_issues(id) ON DELETE RESTRICT;
+
+
--
-- Name: reading_group_composite_entries fk_rails_313af69a44; Type: FK CONSTRAINT; Schema: public; Owner: -
--
@@ -7210,6 +7668,14 @@ ALTER TABLE ONLY public.reading_group_projects
ADD CONSTRAINT fk_rails_af4c0905cb FOREIGN KEY (reading_group_id) REFERENCES public.reading_groups(id) ON DELETE CASCADE;
+--
+-- Name: pg_search_documents fk_rails_b02f365b4d; Type: FK CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.pg_search_documents
+ ADD CONSTRAINT fk_rails_b02f365b4d FOREIGN KEY (journal_issue_id) REFERENCES public.journal_issues(id) ON DELETE SET NULL;
+
+
--
-- Name: import_selection_matches fk_rails_b3b5d1b78b; Type: FK CONSTRAINT; Schema: public; Owner: -
--
@@ -7370,6 +7836,14 @@ ALTER TABLE ONLY public.user_collected_composite_entries
ADD CONSTRAINT fk_rails_e03a5be0da FOREIGN KEY (project_id) REFERENCES public.projects(id) ON DELETE CASCADE;
+--
+-- Name: journal_volumes fk_rails_e11de3191d; Type: FK CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.journal_volumes
+ ADD CONSTRAINT fk_rails_e11de3191d FOREIGN KEY (journal_id) REFERENCES public.journals(id) ON DELETE RESTRICT;
+
+
--
-- Name: user_collected_text_sections fk_rails_e3bf44e760; Type: FK CONSTRAINT; Schema: public; Owner: -
--
@@ -7478,6 +7952,8 @@ ALTER TABLE ONLY public.reading_group_composite_entries
-- PostgreSQL database dump complete
--
+\unrestrict LNheIDBOnK0QpSehtUWNxnxD3q94AfOg7XcUemfZCwkIiA26sNgFEmPkBcpqG1A
+
SET search_path TO "$user", public;
INSERT INTO "schema_migrations" (version) VALUES
@@ -7831,17 +8307,25 @@ INSERT INTO "schema_migrations" (version) VALUES
('20250527180248'),
('20250528002025'),
('20250530205742'),
+('20250603170620'),
('20250603192547'),
('20250609191642'),
('20250609192241'),
+('20250723210143'),
('20251016204352'),
('20251017174417'),
('20251017211501'),
('20251020225421'),
+('20251022183946'),
('20251103175506'),
('20251103175949'),
('20251103180007'),
('20251105165521'),
-('20251121202033');
+('20251117204731'),
+('20251120233556'),
+('20251121202033'),
+('20251203230443'),
+('20251203231940'),
+('20260209183815');
diff --git a/api/lib/auth_token.rb b/api/lib/auth_token.rb
index 87fa309fd1..0bf93ad1c0 100644
--- a/api/lib/auth_token.rb
+++ b/api/lib/auth_token.rb
@@ -10,7 +10,7 @@ class << self
def encode(payload, ttl_in_minutes = DEFAULT_TTL)
payload[:exp] = ttl_in_minutes.minutes.from_now.to_i
- JWT.encode(payload, Rails.application.secrets.secret_key_base)
+ JWT.encode(payload, Rails.application.secret_key_base)
end
# @param [User] user
@@ -26,7 +26,7 @@ def encode_user(user)
# Decode a token and return the payload inside
# If will throw an error if expired or invalid. See the docs for the JWT gem.
def decode(token, leeway = nil)
- payload, = JWT.decode(token, Rails.application.secrets.secret_key_base, leeway: leeway)
+ payload, = JWT.decode(token, Rails.application.secret_key_base, leeway: leeway)
payload.with_indifferent_access
end
diff --git a/api/lib/manifold_env.rb b/api/lib/manifold_env.rb
index 535f525890..e3093b7d82 100644
--- a/api/lib/manifold_env.rb
+++ b/api/lib/manifold_env.rb
@@ -13,7 +13,6 @@ module ManifoldEnv
autoload :OauthConfig
autoload :OauthProvider
autoload :RateLimiting
- autoload :RedisConfig
autoload :Types
end
@@ -24,10 +23,6 @@ module ManifoldEnv
mattr_accessor :rate_limiting do
ManifoldEnv::RateLimiting.new
end
-
- mattr_accessor :redis do
- ManifoldEnv::RedisConfig.new
- end
end
ManifoldEnv.eager_load!
diff --git a/api/lib/manifold_env/oauth_provider.rb b/api/lib/manifold_env/oauth_provider.rb
index 77a8447468..dd96e40408 100644
--- a/api/lib/manifold_env/oauth_provider.rb
+++ b/api/lib/manifold_env/oauth_provider.rb
@@ -6,21 +6,20 @@ class OauthProvider
include Equalizer.new(:name)
include ManifoldEnv::HasConfigurationDSL
include ActiveModel::Validations
- include Redis::Objects
-
- value :app_id
- value :secret
CREDENTIAL_KEYS = %i(id secret).freeze
validates :credentials, presence: { message: "are unset" }
attr_reader :name
+ attr_accessor :app_id, :secret
alias id name
def initialize(name)
@name = name
+ @app_id = nil
+ @secret = nil
end
def <=>(other)
@@ -40,7 +39,7 @@ def =~(other)
def credentials
return nil unless has_credentials?
- custom? ? custom.credentials : [app_id.value, secret.value]
+ custom? ? custom.credentials : [app_id, secret]
end
# @!attribute [r] custom
@@ -71,7 +70,7 @@ def has_app_id?
if custom?
custom.client_id.present?
else
- app_id.value.present?
+ app_id.present?
end
end
@@ -83,7 +82,7 @@ def has_secret?
if custom?
custom.client_secret.present?
else
- secret.value.present?
+ secret.present?
end
end
diff --git a/api/lib/manifold_env/rate_limiting.rb b/api/lib/manifold_env/rate_limiting.rb
index 543b735c1b..fbe45cd9c0 100644
--- a/api/lib/manifold_env/rate_limiting.rb
+++ b/api/lib/manifold_env/rate_limiting.rb
@@ -6,13 +6,14 @@
module ManifoldEnv
class RateLimiting
include DefinesRateLimits
- include Redis::Objects
DNS_SERVERS = %w[
8.8.8.8
8.8.4.4
].freeze
+ PUBLIC_IPS_CACHE_KEY = "rate_limiting:public_ips"
+
map_throttle! :comment_creation, limit: 10, period: 3600
map_throttle! :public_annotation_creation, limit: 5, period: 300
@@ -21,10 +22,6 @@ class RateLimiting
map_throttle! :registration, limit: 5, period: 86_400
- # We store the public IP(s) for the Manifold application
- # so that the client does not accidentally get throttled.
- set :public_ips
-
def id
1
end
@@ -37,6 +34,11 @@ def each_throttled_category
end
end
+ # @return [Set]
+ def public_ips
+ Rails.cache.read(PUBLIC_IPS_CACHE_KEY) || Set.new
+ end
+
# @param [String] domain
# @return [void]
def derive_public_ips!(domain)
@@ -47,7 +49,7 @@ def derive_public_ips!(domain)
end
rescue Resolv::ResolvError
# :nocov:
- public_ips.clear
+ clear_public_ips!
# :nocov:
end
@@ -57,12 +59,17 @@ def derive_public_ips!(domain)
# @return [void]
def reset_public_ips!(new_ips)
if new_ips.present?
- self.public_ips = new_ips
+ Rails.cache.write(PUBLIC_IPS_CACHE_KEY, new_ips.to_set)
else
- public_ips.clear
+ clear_public_ips!
end
end
+ # @return [void]
+ def clear_public_ips!
+ Rails.cache.delete(PUBLIC_IPS_CACHE_KEY)
+ end
+
# @api private
class Throttler
include Dry::Core::Equalizer.new(:category)
diff --git a/api/lib/manifold_env/redis_config.rb b/api/lib/manifold_env/redis_config.rb
deleted file mode 100644
index a830f6d8a8..0000000000
--- a/api/lib/manifold_env/redis_config.rb
+++ /dev/null
@@ -1,56 +0,0 @@
-# frozen_string_literal: true
-
-module ManifoldEnv
- class RedisConfig
- attr_reader :url
-
- # @param [String] url
- # @param [String] namespace_prefix
- def initialize(url: default_url, namespace_prefix: nil)
- @url = url
- @namespace_prefix = namespace_prefix || default_namespace_prefix
- end
-
- def namespace(*parts)
- [@namespace_prefix, *parts].join(":")
- end
-
- def namespaced_url(*parts)
- "#{url}/#{namespace(*parts)}"
- end
-
- def cache_options
- {
- namespace: namespace("cache"),
- url: url
- }
- end
-
- def sidekiq_options
- {
- url: url,
- namespace: "#{namespace('sidekiq')}:"
- }
- end
-
- def build_connection_pool(*namespace_parts, size: 5, timeout: 5)
- ConnectionPool.new size: size, timeout: timeout do
- build_connection(*namespace_parts)
- end
- end
-
- def build_connection(*namespace_parts)
- Redis::Namespace.new(namespace(*namespace_parts), redis: Redis.new(url: url))
- end
-
- private
-
- def default_namespace_prefix
- ENV["RAILS_REDIS_NAMESPACE"] || "manifold"
- end
-
- def default_url
- ENV["BOXEN_REDIS_URL"] || ENV["RAILS_REDIS_URL"] || "redis://127.0.0.1:6379"
- end
- end
-end
diff --git a/api/lib/storage/factory.rb b/api/lib/storage/factory.rb
index c4ae8cf363..a78ef5b6ee 100644
--- a/api/lib/storage/factory.rb
+++ b/api/lib/storage/factory.rb
@@ -6,6 +6,8 @@
require "shrine/storage/google_cloud_storage"
require "shrine/storage/s3"
+require "tus/storage/s3"
+
require_relative "types"
require_relative "strategy"
require_relative "tus_gcs"
@@ -166,6 +168,12 @@ def tus_server_s3_storage
)
end
+ def url_options
+ {
+ host: asset_host
+ }.compact
+ end
+
private
def file_storage(path, prefix)
@@ -196,7 +204,7 @@ def store_s3_options
end
def s3_storage(bucket, prefix)
- Shrine::Storage::S3.new(bucket:, **store_s3_options)
+ Shrine::Storage::S3.new(**store_s3_options, **{ bucket:, prefix: }.compact, public: true)
end
def test_storage(path, prefix)
@@ -206,6 +214,14 @@ def test_storage(path, prefix)
def test?
Rails.env.test?
end
+
+ def asset_host
+ if primary_store.file?
+ Rails.configuration.manifold.api_url&.sub(%r{/\z}, "") || ""
+ else
+ UploadConfig.asset_host
+ end
+ end
end
end
end
diff --git a/api/lib/tasks/release.rake b/api/lib/tasks/release.rake
new file mode 100644
index 0000000000..83dcd5b9d3
--- /dev/null
+++ b/api/lib/tasks/release.rake
@@ -0,0 +1,11 @@
+# frozen_string_literal: true
+
+namespace :manifold do
+ desc "Performs release tasks - database migration, database reseed, and version upgrade tasks"
+ task release: :environment do
+ Rake::Task["db:migrate:primary"].invoke
+ Rake::Task["db:migrate:cache"].invoke
+ Rake::Task["db:seed"].invoke
+ Rake::Task["manifold:upgrade"].invoke
+ end
+end
diff --git a/api/spec/jobs/formatted_attributes/purge_legacy_caches_job_spec.rb b/api/spec/jobs/formatted_attributes/purge_legacy_caches_job_spec.rb
deleted file mode 100644
index 2fec5ef29c..0000000000
--- a/api/spec/jobs/formatted_attributes/purge_legacy_caches_job_spec.rb
+++ /dev/null
@@ -1,11 +0,0 @@
-# frozen_string_literal: true
-
-require 'rails_helper'
-
-RSpec.describe FormattedAttributes::PurgeLegacyCachesJob, type: :job do
- it "runs without issue" do
- expect do
- described_class.perform_now
- end.to execute_safely
- end
-end
diff --git a/api/spec/rails_helper.rb b/api/spec/rails_helper.rb
index f78e9047ca..3a878affab 100644
--- a/api/spec/rails_helper.rb
+++ b/api/spec/rails_helper.rb
@@ -110,8 +110,6 @@
# If you are not using ActiveRecord, you can remove this line.
ActiveRecord::Migration.maintain_test_schema!
-ActiveJob::Uniqueness.test_mode!
-
TestProf::FactoryDefault.configure do |config|
config.preserve_attributes = true
config.preserve_traits = true
@@ -189,10 +187,7 @@
# Truncate all test database tables before running tests.
config.before(:suite) do
DatabaseCleaner[:active_record].strategy = :transaction
- DatabaseCleaner[:redis].strategy = :deletion
-
DatabaseCleaner[:active_record].clean_with(:truncation)
- DatabaseCleaner[:redis].clean_with(:deletion)
Scenic.database.views.select(&:materialized).each do |view|
Scenic.database.refresh_materialized_view view.name, concurrently: false, cascade: false
diff --git a/api/spec/requests/comments_spec.rb b/api/spec/requests/comments_spec.rb
index ca4a33d74a..846a00db6a 100644
--- a/api/spec/requests/comments_spec.rb
+++ b/api/spec/requests/comments_spec.rb
@@ -133,7 +133,7 @@
end.to change(Comment, :count).by(10)
.and change(ThrottledRequest, :count).by(1)
- expect(response).to have_http_status(:service_unavailable)
+ expect(response).to have_http_status(:too_many_requests)
end
context "when the comment is spammy" do
diff --git a/api/spec/requests/reading_groups_spec.rb b/api/spec/requests/reading_groups_spec.rb
index cff9292134..ec092d7074 100644
--- a/api/spec/requests/reading_groups_spec.rb
+++ b/api/spec/requests/reading_groups_spec.rb
@@ -149,7 +149,7 @@ def making_the_request
end.to change(ReadingGroup, :count).by(10)
.and change(ThrottledRequest, :count).by(1)
- expect(response).to have_http_status(:service_unavailable)
+ expect(response).to have_http_status(:too_many_requests)
end
context "when the user has an unconfirmed email" do
diff --git a/api/spec/requests/text_sections/relationships/annotations_spec.rb b/api/spec/requests/text_sections/relationships/annotations_spec.rb
index 3ebca8160f..1210d04b58 100644
--- a/api/spec/requests/text_sections/relationships/annotations_spec.rb
+++ b/api/spec/requests/text_sections/relationships/annotations_spec.rb
@@ -206,7 +206,7 @@ def make_the_request!
end.to change(Annotation, :count).by(5)
.and change(ThrottledRequest, :count).by(1)
- expect(response).to have_http_status(:service_unavailable)
+ expect(response).to have_http_status(:too_many_requests)
end
context "when the user has not confirmed their email" do
diff --git a/api/spec/requests/users_spec.rb b/api/spec/requests/users_spec.rb
index 72f73dc18c..e77252c0de 100644
--- a/api/spec/requests/users_spec.rb
+++ b/api/spec/requests/users_spec.rb
@@ -83,7 +83,7 @@ def make_request!(headers: anonymous_headers, params: valid_params)
end.to change(User, :count).by(5)
.and change(ThrottledRequest, :count).by(1)
- expect(response).to have_http_status(:service_unavailable)
+ expect(response).to have_http_status(:too_many_requests)
end
it "tells the welcome mailer that the user was created by the admin when meta[createdByAdmin] is true" do
@@ -121,7 +121,7 @@ def make_request!(headers: anonymous_headers, params: valid_params)
end.to keep_the_same(User, :count)
.and change(ThrottledRequest, :count).by(1)
- expect(response).to have_http_status(:service_unavailable)
+ expect(response).to have_http_status(:too_many_requests)
end
end
end
diff --git a/api/zhong.rb b/api/zhong.rb
deleted file mode 100644
index 56bcd94f36..0000000000
--- a/api/zhong.rb
+++ /dev/null
@@ -1,66 +0,0 @@
-# frozen_string_literal: true
-
-require "./config/boot"
-require "./config/environment"
-
-Zhong.redis = Redis.new(url: ENV["RAILS_REDIS_URL"])
-
-Zhong.schedule do
- category "caches" do
- every(10.minutes, "refresh_all_flag_status_data") do
- ::Flags::RefreshAllStatusDataJob.perform_later
- end
-
- every(15.minutes, "refresh_project_collections") do
- ::ProjectCollectionJobs::QueueCacheCollectionProjectsJob.perform_later
- end
- end
-
- category "entitlements" do
- every(15.minutes, "audit") do
- Entitlements::AuditJob.perform_later
- end
-
- every(1.hour, "check_expiration") do
- Entitlements::CheckExpirationJob.perform_later
- end
- end
-
- category "uploads" do
- every(1.day, "expire_shrine_cache", at: "22:00", tz: "America/Los_Angeles") do
- ExpireShrineCacheJob.perform_later
- end
-
- every(1.day, "expire_tus_uploads", at: "23:00", tz: "America/Los_Angeles") do
- ExpireTusUploadsJob.perform_later
- end
- end
-
- category "notification" do
- every(1.day, "enqueue_user_daily_digests", at: "06:00") do
- Notifications::EnqueueDigestsJob.perform_later "daily"
- end
-
- every(1.week, "enqueue_user_weekly_digests", at: "Sunday 06:00") do
- Notifications::EnqueueDigestsJob.perform_later "weekly"
- end
- end
-
- category "packaging" do
- every(5.minutes, "automate_text_exports") do
- Texts::AutomateExportsJob.perform_later
- end
-
- every(1.day, "prune_text_exports", at: "01:00") do
- TextExports::PruneJob.perform_later
- end
-
- every(1.day, "prune_project_exports", at: "01:05") do
- ProjectExports::PruneJob.perform_later
- end
-
- every(4.hours, "prune_bag_it_temporary_directory") do
- Packaging::BagItSpec::PruneTemporaryDirectoryJob.perform_later
- end
- end
-end
diff --git a/bin/bundle b/bin/bundle
new file mode 100755
index 0000000000..a71368e323
--- /dev/null
+++ b/bin/bundle
@@ -0,0 +1,114 @@
+#!/usr/bin/env ruby
+# frozen_string_literal: true
+
+#
+# This file was generated by Bundler.
+#
+# The application 'bundle' is installed as part of a gem, and
+# this file is here to facilitate running it.
+#
+
+require "rubygems"
+
+m = Module.new do
+ module_function
+
+ def invoked_as_script?
+ File.expand_path($0) == File.expand_path(__FILE__)
+ end
+
+ def env_var_version
+ ENV["BUNDLER_VERSION"]
+ end
+
+ def cli_arg_version
+ return unless invoked_as_script? # don't want to hijack other binstubs
+ return unless "update".start_with?(ARGV.first || " ") # must be running `bundle update`
+ bundler_version = nil
+ update_index = nil
+ ARGV.each_with_index do |a, i|
+ if update_index && update_index.succ == i && a =~ Gem::Version::ANCHORED_VERSION_PATTERN
+ bundler_version = a
+ end
+ next unless a =~ /\A--bundler(?:[= ](#{Gem::Version::VERSION_PATTERN}))?\z/
+ bundler_version = $1
+ update_index = i
+ end
+ bundler_version
+ end
+
+ def gemfile
+ gemfile = ENV["BUNDLE_GEMFILE"]
+ return gemfile if gemfile && !gemfile.empty?
+
+ File.expand_path("../../Gemfile", __FILE__)
+ end
+
+ def lockfile
+ lockfile =
+ case File.basename(gemfile)
+ when "gems.rb" then gemfile.sub(/\.rb$/, gemfile)
+ else "#{gemfile}.lock"
+ end
+ File.expand_path(lockfile)
+ end
+
+ def lockfile_version
+ return unless File.file?(lockfile)
+ lockfile_contents = File.read(lockfile)
+ return unless lockfile_contents =~ /\n\nBUNDLED WITH\n\s{2,}(#{Gem::Version::VERSION_PATTERN})\n/
+ Regexp.last_match(1)
+ end
+
+ def bundler_version
+ @bundler_version ||=
+ env_var_version || cli_arg_version ||
+ lockfile_version
+ end
+
+ def bundler_requirement
+ return "#{Gem::Requirement.default}.a" unless bundler_version
+
+ bundler_gem_version = Gem::Version.new(bundler_version)
+
+ requirement = bundler_gem_version.approximate_recommendation
+
+ return requirement unless Gem::Version.new(Gem::VERSION) < Gem::Version.new("2.7.0")
+
+ requirement += ".a" if bundler_gem_version.prerelease?
+
+ requirement
+ end
+
+ def load_bundler!
+ ENV["BUNDLE_GEMFILE"] ||= gemfile
+
+ activate_bundler
+ end
+
+ def activate_bundler
+ gem_error = activation_error_handling do
+ gem "bundler", bundler_requirement
+ end
+ return if gem_error.nil?
+ require_error = activation_error_handling do
+ require "bundler/version"
+ end
+ return if require_error.nil? && Gem::Requirement.new(bundler_requirement).satisfied_by?(Gem::Version.new(Bundler::VERSION))
+ warn "Activating bundler (#{bundler_requirement}) failed:\n#{gem_error.message}\n\nTo install the version of bundler this project requires, run `gem install bundler -v '#{bundler_requirement}'`"
+ exit 42
+ end
+
+ def activation_error_handling
+ yield
+ nil
+ rescue StandardError, LoadError => e
+ e
+ end
+end
+
+m.load_bundler!
+
+if m.invoked_as_script?
+ load Gem.bin_path("bundler", "bundle")
+end
diff --git a/client/Dockerfile b/client/Dockerfile
new file mode 100644
index 0000000000..3636a27bd3
--- /dev/null
+++ b/client/Dockerfile
@@ -0,0 +1,20 @@
+FROM node:16.20.2 AS base
+
+WORKDIR /srv/app
+COPY ./ /srv/app
+
+RUN yarn
+
+FROM base AS development
+
+ RUN yarn build:dev
+
+ EXPOSE 3010 3011 3012
+
+ CMD ["yarn", "run", "watch"]
+
+FROM base AS production
+
+ RUN yarn build:prod
+
+ CMD ["yarn", "run", "start-docker"]
\ No newline at end of file
diff --git a/client/script/build-browser-config.js b/client/script/build-browser-config.js
index 7499a803bd..bcfcb7a1b2 100644
--- a/client/script/build-browser-config.js
+++ b/client/script/build-browser-config.js
@@ -10,8 +10,11 @@ const output = compileEnv(template);
const writePath = `${paths.build}/www/`;
/* eslint-disable no-console */
-mkdirp(writePath, function writeConfig(err) {
- if (err) return console.error("Unable to mkdir at " + writePath);
- fs.writeFileSync(`${paths.build}/www/browser.config.js`, output);
-});
+mkdirp(writePath)
+ .then(ignored => {
+ fs.writeFileSync(`${paths.build}/www/browser.config.js`, output);
+ })
+ .catch(err => {
+ console.error("Unable to mkdir at " + writePath + ": " + err);
+ });
/* eslint-enable no-console */
diff --git a/client/src/config/environment/index.js b/client/src/config/environment/index.js
index db9c4e4430..c7cfcfd221 100644
--- a/client/src/config/environment/index.js
+++ b/client/src/config/environment/index.js
@@ -1,6 +1,6 @@
const isServer = typeof __SERVER__ === "undefined" || __SERVER__;
const isBrowser = !isServer;
-const name = process.env.NODE_ENV.toLowerCase() || "development";
+const name = process.env.NODE_ENV?.toLowerCase() || "development";
const skipSSR = process.env.SKIP_SSR || false;
const baseConfig = {
diff --git a/client/src/servers/common/ProxyHelper.js b/client/src/servers/common/ProxyHelper.js
index cc2c798f37..72eb745ad4 100644
--- a/client/src/servers/common/ProxyHelper.js
+++ b/client/src/servers/common/ProxyHelper.js
@@ -5,6 +5,8 @@ import isRegExp from "lodash/isRegExp";
import serveStatic from "serve-static";
import path from "path";
+const proxyDebug = process.env.PROXY_DEBUG === "true";
+
class ProxyHelper {
constructor(name) {
this.name = name;
@@ -13,6 +15,43 @@ class ProxyHelper {
this.wwwTarget = path.join(__dirname, "..", "www");
}
+ proxyOptions(proxyPath, target, logLevel) {
+ const options = {
+ target,
+ logLevel: proxyDebug ? logLevel : "silent",
+ changeOrigin: true,
+ onError: (err, req, ignored) => {
+ ch.error(
+ `[Proxy Error] ${this.name} | ${proxyPath} -> ${target} | ${req.method} ${req.url}`
+ );
+ ch.error(`[Proxy Error] ${err.message}`);
+ ch.error(err.stack);
+ }
+ };
+
+ if (proxyDebug) {
+ options.onProxyReq = (proxyReq, req) => {
+ const clientIp =
+ req.headers["x-forwarded-for"] || req.connection.remoteAddress;
+ ch.info(
+ `[Proxy Req] ${this.name} | ${req.method} ${req.url} -> ${target}${req.url} | IP: ${clientIp}`
+ );
+ };
+ options.onProxyRes = (proxyRes, req) => {
+ ch.info(
+ `[Proxy Res] ${this.name} | ${req.method} ${req.url} | Status: ${proxyRes.statusCode}`
+ );
+ if (proxyRes.headers.location) {
+ ch.info(
+ `[Proxy Res] Redirect Location: ${proxyRes.headers.location}`
+ );
+ }
+ };
+ }
+
+ return options;
+ }
+
proxyAPIPaths(app) {
this.defineProxy(app, "/system", this.apiAssetTarget);
this.defineProxy(app, "/api/proxy", this.apiAssetTarget);
@@ -49,17 +88,17 @@ class ProxyHelper {
app.use(proxyPath, serveStatic(target, serveStaticOptions));
}
- defineProxy(app, proxyPath, target, logLevel = "silent") {
+ defineProxy(app, proxyPath, target, logLevel = "debug") {
if (isRegExp(proxyPath))
return this.defineRegExpProxy(app, proxyPath, target, logLevel);
ch.background(
`${this.name} server will proxy ${proxyPath} requests to ${target}.`
);
- app.use(proxyPath, proxy({ target, logLevel }));
+ app.use(proxyPath, proxy(this.proxyOptions(proxyPath, target, logLevel)));
}
- defineRegExpProxy(app, proxyPath, target, logLevel = "silent") {
- const theProxy = proxy({ target, logLevel });
+ defineRegExpProxy(app, proxyPath, target, logLevel = "debug") {
+ const theProxy = proxy(this.proxyOptions(proxyPath, target, logLevel));
ch.background(
`${
this.name
diff --git a/client/yarn.lock b/client/yarn.lock
index 405b47501c..f45cafdc02 100644
--- a/client/yarn.lock
+++ b/client/yarn.lock
@@ -4889,13 +4889,14 @@ foreground-child@^3.1.0:
signal-exit "^4.0.1"
form-data@^4.0.0:
- version "4.0.2"
- resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.2.tgz#35cabbdd30c3ce73deb2c42d3c8d3ed9ca51794c"
- integrity sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==
+ version "4.0.4"
+ resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.4.tgz#784cdcce0669a9d68e94d11ac4eea98088edd2c4"
+ integrity sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==
dependencies:
asynckit "^0.4.0"
combined-stream "^1.0.8"
es-set-tostringtag "^2.1.0"
+ hasown "^2.0.2"
mime-types "^2.1.12"
format@^0.2.0:
@@ -5504,7 +5505,7 @@ inflight@^1.0.4:
once "^1.3.0"
wrappy "1"
-inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.1, inherits@~2.0.3, inherits@~2.0.4:
+inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.1, inherits@~2.0.3, inherits@~2.0.4:
version "2.0.4"
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c"
integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
@@ -8598,7 +8599,7 @@ safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1:
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d"
integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
-safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@~5.2.0:
+safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.1.0, safe-buffer@^5.2.1, safe-buffer@~5.2.0:
version "5.2.1"
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6"
integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
@@ -8794,9 +8795,9 @@ setprototypeof@1.2.0:
integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==
sha.js@^2.4.11:
- version "2.4.11"
- resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.11.tgz#37a5cf0b81ecbc6943de109ba2960d1b26584ae7"
- integrity sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==
+ version "2.4.12"
+ resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.12.tgz#eb8b568bf383dfd1867a32c3f2b74eb52bdbf23f"
+ integrity sha512-8LzC5+bvI45BjpfXU8V5fdU2mfeKiQe1D1gIMn7XUlF3OTUrpdJpPPH4EMAnF0DsHHdSZqCdSss5qCmJKuiO3w==
dependencies:
inherits "^2.0.1"
safe-buffer "^5.0.1"
@@ -9576,6 +9577,15 @@ tmp@^0.0.33:
dependencies:
os-tmpdir "~1.0.2"
+to-buffer@^1.2.0:
+ version "1.2.1"
+ resolved "https://registry.yarnpkg.com/to-buffer/-/to-buffer-1.2.1.tgz#2ce650cdb262e9112a18e65dc29dcb513c8155e0"
+ integrity sha512-tB82LpAIWjhLYbqjx3X4zEeHN6M8CiuOEy2JY8SEQVdYRe3CCHOFaqrBW1doLDrfpWhplcW7BL+bO3/6S3pcDQ==
+ dependencies:
+ isarray "^2.0.5"
+ safe-buffer "^5.2.1"
+ typed-array-buffer "^1.0.3"
+
to-camel-case@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/to-camel-case/-/to-camel-case-1.0.0.tgz#1a56054b2f9d696298ce66a60897322b6f423e46"
diff --git a/docker-compose.yml b/docker-compose.yml
index 5735ea4991..c77e9d9b3a 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -22,40 +22,6 @@ services:
interval: 10s
timeout: 5s
retries: 5
- redis:
- image: bitnami/redis:6.2.7-debian-10-r34
- platform: linux/amd64
- environment:
- - "ALLOW_EMPTY_PASSWORD=yes"
- logging:
- driver: json-file
- options:
- max-size: "10m"
- max-file: "10"
- restart: unless-stopped
- volumes:
- - redis-data:/data
- healthcheck:
- test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
- interval: 30s
- timeout: 5s
- retries: 5
- test-redis:
- image: bitnami/redis:6.2.7-debian-10-r34
- platform: linux/amd64
- environment:
- - "ALLOW_EMPTY_PASSWORD=yes"
- logging:
- driver: json-file
- options:
- max-size: "10m"
- max-file: "10"
- restart: unless-stopped
- healthcheck:
- test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
- interval: 30s
- timeout: 5s
- retries: 5
migrations:
build:
context: api
@@ -64,8 +30,6 @@ services:
depends_on:
postgres:
condition: service_healthy
- redis:
- condition: service_healthy
env_file: docker/manifold.env
logging:
driver: json-file
@@ -113,12 +77,27 @@ services:
restart: "no"
volumes:
- ./docker/minio/client:/root/.mc
+ client:
+ tty: true
+ stdin_open: true
+ build:
+ context: client
+ target: development
+ ports:
+ - "13100:13100" # Rescue
+ - "13101:13101" # SSR
+ - "13102:13102" # Webpack dev server
+ env_file:
+ - ./docker/local.env
+ restart: unless-stopped
+ volumes:
+ - ./client:/srv/app
+ - node_modules:/srv/app/node_modules
web:
tty: true
stdin_open: true
build:
context: api
- dockerfile: docker/development/Dockerfile
depends_on:
postgres:
condition: service_healthy
@@ -126,8 +105,6 @@ services:
condition: service_completed_successfully
minio-client:
condition: service_completed_successfully
- redis:
- condition: service_healthy
env_file:
- ./docker/manifold.env
environment:
@@ -156,43 +133,13 @@ services:
worker:
build:
context: api
- dockerfile: docker/development/Dockerfile
- command: "bin/sidekiq"
+ command: bin/good_job start --probe-port 7001
depends_on:
postgres:
condition: service_healthy
migrations:
condition: service_completed_successfully
- minio-client:
- condition: service_completed_successfully
- redis:
- condition: service_healthy
- env_file:
- - ./docker/manifold.env
- logging:
- driver: json-file
- options:
- max-size: "10m"
- max-file: "10"
- restart: unless-stopped
- volumes:
- - ./api/:/srv/app
- - bundle-cache:/bundle
- - rails-data:/srv/app/data
- - rails-log:/srv/app/log
- - rails-tmp:/srv/app/tmp
- - uploads:/srv/app/public/system
- clock:
- build:
- context: api
- dockerfile: docker/development/Dockerfile
- command: bin/zhong zhong.rb
- depends_on:
- postgres:
- condition: service_healthy
- migrations:
- condition: service_completed_successfully
- redis:
+ web:
condition: service_healthy
env_file:
- ./docker/manifold.env
@@ -203,32 +150,35 @@ services:
max-file: "10"
restart: unless-stopped
volumes:
- - ./api/public/system/:/srv/app/public/system
- ./api/:/srv/app
- bundle-cache:/bundle
- rails-data:/srv/app/data
- rails-log:/srv/app/log
- rails-tmp:/srv/app/tmp
- uploads:/srv/app/public/system
+ healthcheck:
+ test: ["CMD", "curl", "-f", "-s", "-o", "/dev/null", "http://localhost:7001/status/started"]
+ interval: 10s
+ timeout: 10s
+ retries: 3
+ start_period: 15s
spec:
build:
context: api
- dockerfile: docker/development/Dockerfile
+
command: tail -f /dev/null
depends_on:
postgres:
condition: service_healthy
migrations:
condition: service_completed_successfully
- test-redis:
- condition: service_healthy
env_file:
- ./docker/manifold.env
environment:
- RACK_ENV=test
- RAILS_ENV=test
- - RAILS_REDIS_URL=redis://test-redis:6379
- - REDIS_URL=redis://test-redis:6379
+ - DATABASE_URL=postgres://postgres:password@postgres/manifold_test
+ - CACHE_DATABASE_URL=postgres://postgres:password@postgres/manifold_cache_test
logging:
driver: json-file
options:
@@ -256,7 +206,7 @@ volumes:
driver: local
rails-tmp:
driver: local
- redis-data:
- driver: local
uploads:
driver: local
+ node_modules:
+ driver: local
\ No newline at end of file
diff --git a/docker/local.env b/docker/local.env
index f978031c15..06ea8120d8 100644
--- a/docker/local.env
+++ b/docker/local.env
@@ -15,8 +15,6 @@ CLIENT_BROWSER_API_URL=http://localhost:13110
CLIENT_BROWSER_API_CABLE_URL=http://localhost:13120
CLIENT_SERVER_API_URL=http://localhost:13110
-ELASTICSEARCH_URL=http://elasticsearch:9200
-
CLIENT_SERVER_PROXIES=true
# Dummy value for local access
diff --git a/docker/manifold.env b/docker/manifold.env
index 9b77854204..63b6304c60 100644
--- a/docker/manifold.env
+++ b/docker/manifold.env
@@ -1,5 +1,6 @@
##########################################################################################
# Manifold Service Configuration
+# For Development Only
##########################################################################################
DOMAIN=127.0.0.1:13100
@@ -19,13 +20,9 @@ CLIENT_SERVER_PROXIES=true
DISABLE_SPRING=always
RAILS_ENV=development
RAILS_SECRET_KEY=6234a9eada2709680e0db091d48fe7973f6eb23f413d9b5c2b9d17149c9e38e7309a897b6a5231297b89ac6d3c7494d40c7d6454f342c04f8743482f610016aa
-RAILS_DB_USER=postgres
-RAILS_DB_PASS=password
-RAILS_DB_HOST=postgres
-RAILS_DB_PORT=5432
-RAILS_DB_NAME=manifold_development
-RAILS_REDIS_URL=redis://redis:6379
-REDIS_URL=redis://redis:6379
+
+DATABASE_URL="postgres://postgres:password@postgres/manifold_development"
+CACHE_DATABASE_URL="postgres://postgres:password@postgres/manifold_cache_development"
SERVER_PORT=4000
@@ -40,4 +37,4 @@ S3_ACCESS_KEY_ID=minio
S3_SECRET_ACCESS_KEY=minio123
UPLOAD_BUCKET=manifold-storage
-MANIFOLD_SETTINGS_STORAGE_MIRROR=s3
+RAILS_LOG_TO_STD_OUT=TRUE
\ No newline at end of file