Skip to content

Commit 2bcf6ae

Browse files
authored
Merge pull request #2244 from neicnordic/feature/multiple-backends-applications-impl
Feature/multiple backends applications impl
2 parents 04ae38d + 6249a80 commit 2bcf6ae

File tree

149 files changed

+7102
-6036
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

149 files changed

+7102
-6036
lines changed

.github/integration/scripts/charts/dependencies.sh

Lines changed: 56 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -77,15 +77,58 @@ helm install --namespace default nfs-ganesha nfs-ganesha-server-and-external-pro
7777
kubectl create namespace minio
7878
kubectl apply -f .github/integration/scripts/charts/dependencies.yaml
7979

80+
81+
values_file=".github/integration/scripts/charts/values.yaml"
82+
if [ "$1" == "local" ]; then
83+
values_file=/tmp/values.yaml
84+
cp .github/integration/scripts/charts/values.yaml /tmp/values.yaml
85+
fi
86+
8087
if [ "$2" == "s3" ]; then
81-
## S3 storage backend
82-
MINIO_ACCESS="$(random-string)"
83-
export MINIO_ACCESS
84-
MINIO_SECRET="$(random-string)"
85-
export MINIO_SECRET
86-
helm install minio minio/minio \
87-
--namespace minio \
88-
--set rootUser="$MINIO_ACCESS",rootPassword="$MINIO_SECRET",persistence.enabled=false,mode=standalone,resources.requests.memory=128Mi
88+
if [ "$3" = true ] ; then
89+
# Sleep to give cert issuer time to issue certs so we can create an secret with format minio expects
90+
sleep 5
91+
92+
kubectl -n minio create secret generic minio-tls \
93+
--from-file=public.crt=<(kubectl -n minio get secret minio-cert -o jsonpath='{.data.tls\.crt}' | base64 -d) \
94+
--from-file=private.key=<(kubectl -n minio get secret minio-cert -o jsonpath='{.data.tls\.key}' | base64 -d)
95+
96+
## S3 storage backend
97+
MINIO_ACCESS="$(random-string)"
98+
export MINIO_ACCESS
99+
MINIO_SECRET="$(random-string)"
100+
export MINIO_SECRET
101+
helm install minio minio/minio \
102+
--namespace minio \
103+
--set tls.enabled=true,tls.certSecret=minio-tls,rootUser="$MINIO_ACCESS",rootPassword="$MINIO_SECRET",persistence.enabled=false,mode=standalone,resources.requests.memory=128Mi
104+
105+
yq -i '
106+
.global.archive.s3Url = "https://minio.minio" |
107+
.global.backupArchive.s3Url = "https://minio.minio" |
108+
.global.inbox.s3Url = "https://minio.minio" |
109+
.global.s3Inbox.url = "https://minio.minio" |
110+
.global.sync.destination.s3Url = "https://minio.minio"
111+
' "$values_file"
112+
113+
else
114+
## S3 storage backend
115+
MINIO_ACCESS="$(random-string)"
116+
export MINIO_ACCESS
117+
MINIO_SECRET="$(random-string)"
118+
export MINIO_SECRET
119+
helm install minio minio/minio \
120+
--namespace minio \
121+
--set rootUser="$MINIO_ACCESS",rootPassword="$MINIO_SECRET",persistence.enabled=false,mode=standalone,resources.requests.memory=128Mi
122+
123+
yq -i '
124+
.global.archive.s3Url = "http://minio.minio" |
125+
.global.backupArchive.s3Url = "http://minio.minio" |
126+
.global.inbox.s3Url = "http://minio.minio" |
127+
.global.s3Inbox.url = "http://minio.minio" |
128+
.global.sync.destination.s3Url = "http://minio.minio"
129+
' "$values_file"
130+
131+
fi
89132
fi
90133

91134
PGPASSWORD="$(random-string)"
@@ -97,12 +140,6 @@ export MQPASSWORD
97140
TEST_TOKEN="$(bash .github/integration/scripts/sign_jwt.sh ES256 "$dir/jwt.key")"
98141
export TEST_TOKEN
99142

100-
values_file=".github/integration/scripts/charts/values.yaml"
101-
if [ "$1" == "local" ]; then
102-
values_file=/tmp/values.yaml
103-
cp .github/integration/scripts/charts/values.yaml /tmp/values.yaml
104-
fi
105-
106143
## update values file with all credentials
107144
if [ "$2" == "federated" ]; then
108145
yq -i '.global.schemaType = "federated"' "$values_file"
@@ -116,10 +153,13 @@ yq -i '
116153
.global.broker.password = strenv(MQPASSWORD) |
117154
.global.c4gh.privateKeys[0].passphrase = strenv(C4GHPASSPHRASE) |
118155
.global.db.password = strenv(PGPASSWORD) |
156+
.global.db.admin.password = strenv(PGPASSWORD) |
119157
.global.inbox.s3AccessKey = strenv(MINIO_ACCESS) |
120158
.global.inbox.s3SecretKey = strenv(MINIO_SECRET) |
121-
.global.sync.destination.accessKey = strenv(MINIO_ACCESS) |
122-
.global.sync.destination.secretKey = strenv(MINIO_SECRET) |
159+
.global.s3Inbox.accessKey = strenv(MINIO_ACCESS) |
160+
.global.s3Inbox.secretKey = strenv(MINIO_SECRET) |
161+
.global.sync.destination.s3AccessKey = strenv(MINIO_ACCESS) |
162+
.global.sync.destination.s3SecretKey = strenv(MINIO_SECRET) |
123163
.releasetest.secrets.accessToken = strenv(TEST_TOKEN)
124164
' "$values_file"
125165

.github/integration/scripts/charts/dependencies.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ spec:
5050
dnsNames:
5151
- localhost
5252
- minio
53+
- minio.minio
5354
- minio.minio.svc
5455
- minio.minio.svc.cluster.local
5556
ipAddresses:

.github/integration/scripts/charts/values.yaml

Lines changed: 26 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -22,20 +22,33 @@ global:
2222
storageType: s3
2323
s3AccessKey: PLACEHOLDER_VALUE
2424
s3SecretKey: PLACEHOLDER_VALUE
25-
s3Url: "http://minio.minio"
25+
s3Url: PLACEHOLDER_VALUE
2626
s3Bucket: "archive"
2727
s3Port: 9000
28-
s3ReadyPath: "/minio/health/ready"
2928
existingClaim: archive-pvc
3029
backupArchive:
3130
storageType: "s3"
3231
s3AccessKey: PLACEHOLDER_VALUE
3332
s3SecretKey: PLACEHOLDER_VALUE
34-
s3Url: "http://minio.minio"
33+
s3Url: PLACEHOLDER_VALUE
3534
s3Bucket: "backup"
3635
s3Port: 9000
37-
s3ReadyPath: "/minio/health/ready"
3836
existingClaim: backup-pvc
37+
inbox:
38+
storageType: s3
39+
s3AccessKey: PLACEHOLDER_VALUE
40+
s3SecretKey: PLACEHOLDER_VALUE
41+
s3Url: PLACEHOLDER_VALUE
42+
s3Port: 9000
43+
s3Bucket: "inbox"
44+
existingClaim: inbox-pvc
45+
s3Inbox:
46+
accessKey: PLACEHOLDER_VALUE
47+
secretKey: PLACEHOLDER_VALUE
48+
url: PLACEHOLDER_VALUE
49+
port: 9000
50+
bucket: "inbox"
51+
readyPath: "/minio/health/ready"
3952
auth:
4053
jwtSecret: jwk
4154
jwtAlg: ES256
@@ -65,6 +78,9 @@ global:
6578
host: "postgres-sda-db"
6679
user: "postgres"
6780
password: PLACEHOLDER_VALUE
81+
admin:
82+
username: "postgres"
83+
password: PLACEHOLDER_VALUE
6884
doa:
6985
enabled: false
7086
download:
@@ -80,15 +96,6 @@ global:
8096
jwkPath: "/jwks"
8197
id: DfCieZLuBU
8298
secret: DfCieZLuBU
83-
inbox:
84-
storageType: s3
85-
s3AccessKey: PLACEHOLDER_VALUE
86-
s3SecretKey: PLACEHOLDER_VALUE
87-
s3Url: http://minio.minio
88-
s3Port: 9000
89-
s3Bucket: "inbox"
90-
s3ReadyPath: "/minio/health/ready"
91-
existingClaim: inbox-pvc
9299
reencrypt:
93100
host: pipeline-sda-svc-reencrypt
94101
port: 50051
@@ -99,14 +106,12 @@ global:
99106
brokerQueue: "mapping_stream"
100107
centerPrefix: "SYNC"
101108
destination:
102-
storageType: "s3"
103-
url: "http://minio.minio"
104-
port: 9000
105-
readypath: "/minio/health/ready"
106-
accessKey: PLACEHOLDER_VALUE
107-
secretKey: PLACEHOLDER_VALUE
108-
bucket: "sync"
109-
region: "us-east-1"
109+
s3Url: PLACEHOLDER_VALUE
110+
s3Port: 9000
111+
s3AccessKey: PLACEHOLDER_VALUE
112+
s3SecretKey: PLACEHOLDER_VALUE
113+
s3BucketPrefix: "sync"
114+
s3Region: "us-east-1"
110115
remote:
111116
host: "http://remote-sync"
112117
port: "8080"

.github/integration/sda/config.yaml

Lines changed: 47 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,54 @@ log:
33
level: "debug"
44
api:
55
rbacFile: /rbac.json
6-
archive:
7-
type: s3
8-
url: "http://s3"
9-
port: 9000
10-
readypath: "/minio/health/ready"
11-
accessKey: "access"
12-
secretKey: "secretKey"
13-
bucket: "archive"
6+
7+
storage:
8+
archive:
9+
s3:
10+
- endpoint: "http://s3:9000"
11+
access_key: "access"
12+
secret_key: "secretKey"
13+
bucket_prefix: "archive"
14+
region: "us-east-1"
15+
disable_https: true
16+
max_buckets: 10
17+
max_objects: 3
18+
backup:
19+
s3:
20+
- endpoint: "http://s3:9000"
21+
access_key: "access"
22+
secret_key: "secretKey"
23+
bucket_prefix: "backup"
24+
region: "us-east-1"
25+
disable_https: true
26+
inbox:
27+
s3:
28+
- endpoint: "http://s3:9000"
29+
access_key: "access"
30+
secret_key: "secretKey"
31+
bucket_prefix: "inbox"
32+
region: "us-east-1"
33+
disable_https: true
34+
sync:
35+
s3:
36+
- endpoint: "http://s3:9000"
37+
access_key: "access"
38+
secret_key: "secretKey"
39+
bucket_prefix: "sync"
40+
region: "us-east-1"
41+
disable_https: true
42+
max_buckets: 10
43+
max_objects: 10
44+
45+
s3inbox:
46+
endpoint: "http://s3:9000"
47+
access_key: "access"
48+
secret_key: "secretKey"
49+
bucket: "inbox"
1450
region: "us-east-1"
51+
ready_path: "/minio/health/ready"
52+
53+
location_broker.cache_ttl: 0
1554

1655
grpc:
1756
host: "reencrypt"
@@ -33,25 +72,6 @@ auth:
3372
resignJwt:
3473
s3Inbox: "http://inbox:8000"
3574

36-
backup:
37-
type: s3
38-
url: "http://s3"
39-
port: 9000
40-
readypath: "/minio/health/ready"
41-
accessKey: "access"
42-
secretKey: "secretKey"
43-
bucket: "backup"
44-
region: "us-east-1"
45-
inbox:
46-
type: s3
47-
url: "http://s3"
48-
port: 9000
49-
readypath: "/minio/health/ready"
50-
accessKey: "access"
51-
secretKey: "secretKey"
52-
bucket: "inbox"
53-
region: "us-east-1"
54-
5575
broker:
5676
host: "rabbitmq"
5777
port: "5672"
@@ -99,15 +119,6 @@ sync:
99119
password: "pass"
100120
user: "user"
101121
centerPrefix: "SYNC"
102-
destination:
103-
type: "s3"
104-
url: "http://s3"
105-
port: 9000
106-
readypath: "/minio/health/ready"
107-
accessKey: "access"
108-
secretKey: "secretKey"
109-
bucket: "sync"
110-
region: "us-east-1"
111122
remote:
112123
host: "http://sync-api"
113124
port: "8080"

.github/integration/tests/sda/20_ingest-verify_test.sh

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,4 +80,16 @@ if [ "$key_hashes" -eq 0 ]; then
8080
exit 1
8181
fi
8282

83+
num_in_archive_1="$(psql -U postgres -h postgres -d sda -At -c "SELECT COUNT(id) FROM sda.files WHERE archive_location LIKE '%archive1'")"
84+
if [ "$num_in_archive_1" -ne 3 ]; then
85+
echo "::error::Unexpected amount of files in archive bucket 1: $num_in_archive_1, expected 3"
86+
exit 1
87+
fi
88+
89+
num_in_archive_2="$(psql -U postgres -h postgres -d sda -At -c "SELECT COUNT(id) FROM sda.files WHERE archive_location LIKE '%archive2'")"
90+
if [ "$num_in_archive_2" -ne 1 ]; then
91+
echo "::error::Unexpected amount of files in archive bucket 2: : $num_in_archive_2, expected 1"
92+
exit 1
93+
fi
94+
8395
echo "ingestion and verification test completed successfully"

.github/integration/tests/sda/21_cancel_test.sh

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,47 @@ if [ "$(psql -U postgres -h postgres -d sda -At -c "select event from sda.file_e
6060
exit 1
6161
fi
6262

63+
64+
# check database to verify file archive location and path has been unset
65+
if [ "$(psql -U postgres -h postgres -d sda -At -c "SELECT 1 FROM sda.files WHERE id = '$CORRID' AND archive_file_path = '' AND archive_location IS NULL")" != "1" ]; then
66+
echo "canceling file failed"
67+
exit 1
68+
fi
69+
70+
cat >/shared/direct <<EOD
71+
[default]
72+
access_key=access
73+
secret_key=secretKey
74+
check_ssl_certificate = False
75+
check_ssl_hostname = False
76+
encoding = UTF-8
77+
encrypt = False
78+
guess_mime_type = True
79+
host_base = s3:9000
80+
host_bucket = s3:9000
81+
human_readable_sizes = false
82+
multipart_chunk_size_mb = 50
83+
use_https = False
84+
socket_timeout = 30
85+
EOD
86+
87+
# Verify that archived file is removed
88+
result=$(s3cmd -c direct ls s3://archive1/test_dummy.org/"$CORRID")
89+
if [ "$result" != "" ]; then
90+
echo "file with id $CORRID was not removed from archive"
91+
exit 1
92+
fi
93+
result=$(s3cmd -c direct ls s3://archive2/test_dummy.org/"$CORRID")
94+
if [ "$result" != "" ]; then
95+
echo "file with id $CORRID was not removed from archive"
96+
exit 1
97+
fi
98+
result=$(s3cmd -c direct ls s3://backup1/test_dummy.org/"$CORRID")
99+
if [ "$result" != "" ]; then
100+
echo "file with id $CORRID was not removed from backup"
101+
exit 1
102+
fi
103+
63104
# re-ingest cancelled file
64105
ingest_payload=$(
65106
jq -r -c -n \

.github/integration/tests/sda/30_backup-finalize_test.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ EOD
9595
# check DB for archive file names
9696
for file in NA12878.bam.c4gh NA12878.bai.c4gh NA12878_20k_b37.bam.c4gh NA12878_20k_b37.bai.c4gh; do
9797
archiveName=$(psql -U postgres -h postgres -d sda -At -c "SELECT archive_file_path from sda.files where submission_file_path = '$file';")
98-
size=$(s3cmd -c direct ls s3://backup/"$archiveName" | tr -s ' ' | cut -d ' ' -f 3)
98+
size=$(s3cmd -c direct ls s3://backup1/"$archiveName" | tr -s ' ' | cut -d ' ' -f 3)
9999
if [ "$size" -eq 0 ]; then
100100
echo "Failed to get size of $file from backup site"
101101
exit 1

.github/integration/tests/sda/45_sync_test.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ fi
1111
# check bucket for synced files
1212
for file in NA12878.bai NA12878_20k_b37.bai; do
1313
RETRY_TIMES=0
14-
until [ "$(s3cmd -c direct ls s3://sync/"$file")" != "" ]; do
14+
until [ "$(s3cmd -c direct ls s3://sync1/"$file")" != "" ]; do
1515
RETRY_TIMES=$((RETRY_TIMES + 1))
1616
if [ "$RETRY_TIMES" -eq 30 ]; then
1717
echo "::error::Time out while waiting for files to be synced"

0 commit comments

Comments
 (0)