Skip to content

feat: Use S3 node store with seaweedfs #3498

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 19 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions _unit-test/create-docker-volumes-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ sentry-data
sentry-kafka
sentry-postgres
sentry-redis
sentry-seaweedfs
sentry-symbolicator"

before=$(get_volumes)
Expand Down
43 changes: 43 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ x-sentry-defaults: &sentry_defaults
<<: *depends_on-default
smtp:
<<: *depends_on-default
seaweedfs:
<<: *depends_on-default
snuba-api:
<<: *depends_on-default
symbolicator:
Expand Down Expand Up @@ -141,6 +143,7 @@ services:
kafka:
<<: *restart_policy
image: "confluentinc/cp-kafka:7.6.1"
user: root
environment:
# https://docs.confluent.io/platform/current/installation/docker/config-reference.html#cp-kakfa-example
KAFKA_PROCESS_ROLES: "broker,controller"
Expand Down Expand Up @@ -209,6 +212,44 @@ services:
interval: 10s
timeout: 10s
retries: 30
seaweedfs:
image: "chrislusf/seaweedfs:3.96_large_disk"
entrypoint: "weed"
command: >-
server
-filer=true
-filer.port=8888
-filer.port.grpc=18888
-filer.defaultReplicaPlacement=000
-master=true
-master.port=9333
-master.port.grpc=19333
-metricsPort=9091
-s3=true
-s3.port=8333
-s3.port.grpc=18333
-volume=true
-volume.dir.idx=/data/idx
-volume.index=leveldbLarge
-volume.max=0
-volume.preStopSeconds=8
-volume.readMode=redirect
-volume.port=8080
-volume.port.grpc=18080
-ip=127.0.0.1
-ip.bind=0.0.0.0
-webdav=false
environment:
AWS_ACCESS_KEY_ID: sentry
AWS_SECRET_ACCESS_KEY: sentry
volumes:
- "sentry-seaweedfs:/data"
healthcheck:
test: ["CMD", "wget", "-q", "-O-", "http://seaweedfs:8080/healthz", "http://seaweedfs:9333/cluster/healthz", "http://seaweedfs:8333/healthz"]
interval: 30s
timeout: 20s
retries: 5
start_period: 60s
snuba-api:
<<: *snuba_defaults
# Kafka consumer responsible for feeding events into Clickhouse
Expand Down Expand Up @@ -625,6 +666,8 @@ volumes:
external: true
sentry-symbolicator:
external: true
sentry-seaweedfs:
external: true
# This volume stores JS SDK assets and the data inside this volume should
# be cleaned periodically on upgrades.
sentry-nginx-www:
Expand Down
1 change: 1 addition & 0 deletions install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ source install/ensure-relay-credentials.sh
source install/generate-secret-key.sh
source install/update-docker-images.sh
source install/build-docker-images.sh
source install/bootstrap-s3-nodestore.sh
source install/bootstrap-snuba.sh
source install/upgrade-postgres.sh
source install/ensure-correct-permissions-profiles-dir.sh
Expand Down
25 changes: 25 additions & 0 deletions install/bootstrap-s3-nodestore.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
echo "${_group}Bootstrapping seaweedfs (node store)..."

$dc up --wait seaweedfs postgres
$dc exec seaweedfs apk add --no-cache s3cmd
$dc exec seaweedfs mkdir -p /data/idx/
s3cmd="$dc exec seaweedfs s3cmd"

bucket_list=$($s3cmd --access_key=sentry --secret_key=sentry --no-ssl --region=us-east-1 --host=localhost:8333 --host-bucket='localhost:8333/%(bucket)' ls)

if [[ $($bucket_list | tail -1 | awk '{print $3}') != 's3://nodestore' ]]; then
# Only touch if no existing nodestore config is found
if ! grep -q "SENTRY_NODESTORE" $SENTRY_CONFIG_PY; then
nodestore_config=$(sed -n '/SENTRY_NODESTORE/,/[}]/{p}' sentry/sentry.conf.example.py)
if [[ $($dc exec postgres psql -qAt -U postgres -c "select exists (select * from nodestore_node limit 1)") = "f" ]]; then
nodestore_config=$(echo -e "$nodestore_config" | sed '$s/\}/ "read_through": True,\n "delete_through": True,\n\}/')
fi
echo "$nodestore_config" >>$SENTRY_CONFIG_PY
fi
$dc exec seaweedfs mkdir -p /data/idx/
$s3cmd --access_key=sentry --secret_key=sentry --no-ssl --region=us-east-1 --host=localhost:8333 --host-bucket='localhost:8333/%(bucket)' mb s3://nodestore
else
echo "Node store already exists, skipping..."
fi

echo "${_endgroup}"
1 change: 1 addition & 0 deletions install/create-docker-volumes.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,5 +17,6 @@ echo "Created $(create_volume sentry-kafka)."
echo "Created $(create_volume sentry-postgres)."
echo "Created $(create_volume sentry-redis)."
echo "Created $(create_volume sentry-symbolicator)."
echo "Created $(create_volume sentry-seaweedfs)."

echo "${_endgroup}"
6 changes: 4 additions & 2 deletions sentry/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
ARG SENTRY_IMAGE
FROM ${SENTRY_IMAGE}

RUN pip install https://github.com/stayallive/sentry-nodestore-s3/archive/main.zip

COPY . /usr/src/sentry

RUN if [ -s /usr/src/sentry/enhance-image.sh ]; then \
/usr/src/sentry/enhance-image.sh; \
fi
fi

RUN if [ -s /usr/src/sentry/requirements.txt ]; then \
echo "sentry/requirements.txt is deprecated, use sentry/enhance-image.sh - see https://develop.sentry.dev/self-hosted/#enhance-sentry-image"; \
pip install -r /usr/src/sentry/requirements.txt; \
fi
fi
25 changes: 25 additions & 0 deletions sentry/sentry.conf.example.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,31 @@ def get_internal_network():
# See https://develop.sentry.dev/self-hosted/experimental/errors-only/
SENTRY_SELF_HOSTED_ERRORS_ONLY = env("COMPOSE_PROFILES") != "feature-complete"

################
# Node Storage #
################

# Sentry uses an abstraction layer called "node storage" to store raw events.
# Previously, it used PostgreSQL as the backend, but this didn't scale for
# high-throughput environments. Read more about this in the documentation:
# https://develop.sentry.dev/backend/application-domains/nodestore/
#
# Through this setting, you can use the provided blob storage or
# your own S3-compatible API from your infrastructure.
# Other backend implementations for node storage developed by the community
# are available in public GitHub repositories.

SENTRY_NODESTORE = "sentry_nodestore_s3.S3PassthroughDjangoNodeStorage"
SENTRY_NODESTORE_OPTIONS = {
"compression": True,
"endpoint_url": "http://seaweedfs:8333",
"bucket_path": "nodestore",
"bucket_name": "nodestore",
"region_name": "us-east-1",
"aws_access_key_id": "sentry",
"aws_secret_access_key": "sentry",
}

#########
# Redis #
#########
Expand Down
Loading