Skip to content

Commit 901f98c

Browse files
author
Rub21
committed
Add Azure required values in helm templates
1 parent 3418e4c commit 901f98c

File tree

3 files changed

+107
-87
lines changed

3 files changed

+107
-87
lines changed

images/replication-job/start.sh

Lines changed: 81 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,10 @@
22

33
# osmosis tuning: https://wiki.openstreetmap.org/wiki/Osmosis/Tuning,https://lists.openstreetmap.org/pipermail/talk/2012-October/064771.html
44
if [ -z "$MEMORY_JAVACMD_OPTIONS" ]; then
5-
echo JAVACMD_OPTIONS=\"-server\" >~/.osmosis
5+
echo JAVACMD_OPTIONS=\"-server\" >~/.osmosis
66
else
7-
memory="${MEMORY_JAVACMD_OPTIONS//i/}"
8-
echo JAVACMD_OPTIONS=\"-server -Xmx$memory\" >~/.osmosis
7+
memory="${MEMORY_JAVACMD_OPTIONS//i/}"
8+
echo JAVACMD_OPTIONS=\"-server -Xmx$memory\" >~/.osmosis
99
fi
1010

1111
workingDirectory="/mnt/data"
@@ -14,91 +14,91 @@ mkdir -p $workingDirectory
1414
# Check if state.txt exist in the workingDirectory,
1515
# in case the file does not exist locally and does not exist in the cloud the replication will start from 0
1616
if [ ! -f $workingDirectory/state.txt ]; then
17-
echo "File $workingDirectory/state.txt does not exist"
18-
### AWS
19-
if [ $CLOUDPROVIDER == "aws" ]; then
20-
aws s3 ls $AWS_S3_BUCKET/$REPLICATION_FOLDER/state.txt
21-
if [[ $? -eq 0 ]]; then
22-
echo "File exist, let's get it from $CLOUDPROVIDER - $AWS_S3_BUCKET"
23-
aws s3 cp $AWS_S3_BUCKET/$REPLICATION_FOLDER/state.txt $workingDirectory/state.txt
24-
fi
25-
fi
26-
27-
### GCP
28-
if [ $CLOUDPROVIDER == "gcp" ]; then
29-
gsutil ls $GCP_STORAGE_BUCKET/$REPLICATION_FOLDER/state.txt
30-
if [[ $? -eq 0 ]]; then
31-
echo "File exist, let's get it from $CLOUDPROVIDER - $GCP_STORAGE_BUCKET"
32-
gsutil cp $GCP_STORAGE_BUCKET/$REPLICATION_FOLDER/state.txt $workingDirectory/state.txt
33-
fi
34-
fi
35-
36-
### Azure
37-
if [ $CLOUDPROVIDER == "azure" ]; then
38-
state_file_exists=$(az storage blob exists --container-name $AZURE_CONTAINER_NAME --name $REPLICATION_FOLDER/state.txt --query="exists")
39-
if [[ $state_file_exists=="true" ]]; then
40-
echo "File exist, let's get it from $CLOUDPROVIDER - $AZURE_CONTAINER_NAME"
41-
az storage blob download \
42-
--container-name $AZURE_CONTAINER_NAME \
43-
--name $REPLICATION_FOLDER/state.txt \
44-
--file $workingDirectory/state.txt --query="name"
45-
fi
46-
fi
47-
mkdir -p $workingDirectory
17+
echo "File $workingDirectory/state.txt does not exist in local storage"
18+
### AWS
19+
if [ $CLOUDPROVIDER == "aws" ]; then
20+
aws s3 ls $AWS_S3_BUCKET/$REPLICATION_FOLDER/state.txt
21+
if [[ $? -eq 0 ]]; then
22+
echo "File exist, let's get it from $CLOUDPROVIDER - $AWS_S3_BUCKET"
23+
aws s3 cp $AWS_S3_BUCKET/$REPLICATION_FOLDER/state.txt $workingDirectory/state.txt
24+
fi
25+
fi
26+
27+
### GCP
28+
if [ $CLOUDPROVIDER == "gcp" ]; then
29+
gsutil ls $GCP_STORAGE_BUCKET/$REPLICATION_FOLDER/state.txt
30+
if [[ $? -eq 0 ]]; then
31+
echo "File exist, let's get it from $CLOUDPROVIDER - $GCP_STORAGE_BUCKET"
32+
gsutil cp $GCP_STORAGE_BUCKET/$REPLICATION_FOLDER/state.txt $workingDirectory/state.txt
33+
fi
34+
fi
35+
36+
### Azure
37+
if [ $CLOUDPROVIDER == "azure" ]; then
38+
state_file_exists=$(az storage blob exists --container-name $AZURE_CONTAINER_NAME --name $REPLICATION_FOLDER/state.txt --query="exists")
39+
if [[ $state_file_exists=="true" ]]; then
40+
echo "File exist, let's get it from $CLOUDPROVIDER - $AZURE_CONTAINER_NAME"
41+
az storage blob download \
42+
--container-name $AZURE_CONTAINER_NAME \
43+
--name $REPLICATION_FOLDER/state.txt \
44+
--file $workingDirectory/state.txt --query="name"
45+
fi
46+
fi
47+
mkdir -p $workingDirectory
4848
fi
4949

5050
# Creating the replication files
5151
function generateReplication() {
52-
osmosis -q \
53-
--replicate-apidb \
54-
iterations=0 \
55-
minInterval=60000 \
56-
maxInterval=120000 \
57-
host=$POSTGRES_HOST \
58-
database=$POSTGRES_DB \
59-
user=$POSTGRES_USER \
60-
password=$POSTGRES_PASSWORD \
61-
validateSchemaVersion=no \
62-
--write-replication \
63-
workingDirectory=$workingDirectory &
64-
while true; do
65-
for local_file in $(find $workingDirectory/ -cmin -1); do
66-
if [ -f "$local_file" ]; then
67-
68-
cloud_file=$REPLICATION_FOLDER/${local_file#*"$workingDirectory/"}
69-
echo $(date +%F_%H:%M:%S)": Copy file...$local_file to $cloud_file"
70-
71-
### AWS
72-
if [ $CLOUDPROVIDER == "aws" ]; then
73-
aws s3 cp $local_file $AWS_S3_BUCKET/$cloud_file --acl public-read
74-
fi
75-
76-
### GCP
77-
if [ $CLOUDPROVIDER == "gcp" ]; then
78-
#TODO, emable public acces
79-
gsutil cp -a public-read $local_file $GCP_STORAGE_BUCKET/$cloud_file
80-
fi
81-
82-
### Azure
83-
if [ $CLOUDPROVIDER == "azure" ]; then
84-
#TODO, emable public acces
85-
az storage blob upload \
86-
--container-name $AZURE_CONTAINER_NAME \
87-
--file $local_file \
88-
--name $cloud_file \
89-
--output none
90-
fi
91-
fi
92-
done
93-
sleep 15s
94-
done
52+
osmosis -q \
53+
--replicate-apidb \
54+
iterations=0 \
55+
minInterval=60000 \
56+
maxInterval=120000 \
57+
host=$POSTGRES_HOST \
58+
database=$POSTGRES_DB \
59+
user=$POSTGRES_USER \
60+
password=$POSTGRES_PASSWORD \
61+
validateSchemaVersion=no \
62+
--write-replication \
63+
workingDirectory=$workingDirectory &
64+
while true; do
65+
for local_file in $(find $workingDirectory/ -cmin -1); do
66+
if [ -f "$local_file" ]; then
67+
68+
cloud_file=$REPLICATION_FOLDER/${local_file#*"$workingDirectory/"}
69+
echo $(date +%F_%H:%M:%S)": Copy file...$local_file to $cloud_file"
70+
71+
### AWS
72+
if [ $CLOUDPROVIDER == "aws" ]; then
73+
aws s3 cp $local_file $AWS_S3_BUCKET/$cloud_file --acl public-read
74+
fi
75+
76+
### GCP
77+
if [ $CLOUDPROVIDER == "gcp" ]; then
78+
#TODO, emable public acces
79+
gsutil cp -a public-read $local_file $GCP_STORAGE_BUCKET/$cloud_file
80+
fi
81+
82+
### Azure
83+
if [ $CLOUDPROVIDER == "azure" ]; then
84+
#TODO, emable public acces
85+
az storage blob upload \
86+
--container-name $AZURE_CONTAINER_NAME \
87+
--file $local_file \
88+
--name $cloud_file \
89+
--output none
90+
fi
91+
fi
92+
done
93+
sleep 15s
94+
done
9595
}
9696

9797
# Check if Postgres is ready
9898
flag=true
9999
while "$flag" = true; do
100-
pg_isready -h $POSTGRES_HOST -p 5432 >/dev/null 2>&2 || continue
101-
# Change flag to false to stop ping the DB
102-
flag=false
103-
generateReplication
100+
pg_isready -h $POSTGRES_HOST -p 5432 >/dev/null 2>&2 || continue
101+
# Change flag to false to stop ping the DB
102+
flag=false
103+
generateReplication
104104
done

osm-seed/templates/replication-job-deployment.yaml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,17 @@ spec:
5353
{{- if eq .Values.cloudProvider "gcp" }}
5454
- name: GCP_STORAGE_BUCKET
5555
value: {{ .Values.GCP_STORAGE_BUCKET }}
56+
{{- end }}
57+
# In case cloudProvider=azure
58+
{{- if eq .Values.cloudProvider "azure" }}
59+
- name: AZURE_STORAGE_ACCOUNT
60+
value: {{ .Values.AZURE_STORAGE_ACCOUNT }}
61+
- name: AZURE_CONTAINER_NAME
62+
value: {{ .Values.AZURE_CONTAINER_NAME }}
63+
- name: AZURE_STORAGE_ACCESS_KEY
64+
value: {{ .Values.AZURE_STORAGE_ACCESS_KEY }}
65+
- name: AZURE_STORAGE_CONNECTION_STRING
66+
value: {{ .Values.AZURE_STORAGE_CONNECTION_STRING }}
5667
{{- end }}
5768
# Memory optimization for osmosis
5869
{{- if .Values.replicationJob.resources.enabled }}

osm-seed/values.yaml

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,24 +6,33 @@
66
# The version of the image group in osm-seed, get it here: https://hub.docker.com/r/developmentseed/osmseed-web/tags/
77
osmSeedVersion: develop-9302179
88
environment: development
9-
# cloudProvider is provider where you are going to deploy osm-seed, it could be: aws, gcp, minikube
9+
# cloudProvider is provider where you are going to deploy osm-seed, it could be: aws, gcp, azure, minikube
1010
cloudProvider: minikube
11+
1112
# ====================================================================================================
1213
# AWS: In case you are using the cloudProvider=aws set the below variables, We are assuming the nodes has a policies access to S3
1314
# ====================================================================================================
1415
AWS_S3_BUCKET: s3://osm-seed
1516

17+
# ====================================================================================================
18+
# GCP: In case you are using the cloudProvider=gcp set the below variables, We are assuming the nodes has a policies to upload files to cloud storage
19+
# ====================================================================================================
20+
GCP_STORAGE_BUCKET: gs://osm-seed-test
21+
22+
# ====================================================================================================
23+
# AZURE: In case you are using the cloudProvider=azure set the below variables
24+
# ====================================================================================================
25+
AZURE_STORAGE_ACCOUNT: osmseed
26+
AZURE_CONTAINER_NAME: osm-seed
27+
AZURE_STORAGE_ACCESS_KEY: xyz..
28+
AZURE_STORAGE_CONNECTION_STRING: xyz..
29+
1630
# ====================================================
1731
# AWS: Specify ARN for SSL certificate, currently assumes a single wildcard cert
1832
# ====================================================
1933

2034
AWS_SSL_ARN: false
2135

22-
# ====================================================================================================
23-
# GCP: In case you are using the cloudProvider=gcp set the below variables
24-
# ====================================================================================================
25-
GCP_STORAGE_BUCKET: gs://osm-seed-test
26-
2736
# ====================================================================================================
2837
# Ingress variables
2938
# ====================================================================================================

0 commit comments

Comments
 (0)