Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions on-prem-installers/assets/gitea/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#
# SPDX-License-Identifier: Apache-2.0


redis-cluster:
enabled: false
postgresql:
Expand Down Expand Up @@ -161,3 +162,9 @@ containerSecurityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true

# Use Recreate strategy to avoid queue lock issues when upgrading
# Gitea uses LevelDB for internal queues (/data/queues/) which can only be accessed by one pod at a time
deployment:
strategy:
type: Recreate
19 changes: 16 additions & 3 deletions on-prem-installers/mage/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,15 @@ func (Upgrade) rke2Cluster() error {
1.27 needs to be installed first.
TODO: Add logic to determine version hops dynamically instead of hardcoding them.
NOTE: EMF v3.0.0 uses "v1.30.10+rke2r1"
NOTE: Upgrading from 1.30.10 to 1.34.1 requires intermediate versions: 1.30 → 1.31 → 1.32 → 1.33 → 1.34
*/
for i, rke2UpgradeVersion := range []string{"v1.30.10+rke2r1", "v1.30.14+rke2r2"} {
for i, rke2UpgradeVersion := range []string{
"v1.30.14+rke2r2", // Patch update within 1.30
"v1.31.13+rke2r1", // Upgrade to 1.31
"v1.32.9+rke2r1", // Upgrade to 1.32
"v1.33.5+rke2r1", // Upgrade to 1.33
"v1.34.1+rke2r1", // Final target version
} {
// Set version in upgrade Plan and render template.
tmpl, err := template.ParseFiles(filepath.Join("rke2", "upgrade-plan.tmpl"))
if err != nil {
Expand Down Expand Up @@ -99,8 +106,14 @@ func (Upgrade) rke2Cluster() error {
return err
}

if i == 0 {
fmt.Printf("RKE2 upgraded to intermediate version %s, starting another upgrade...\n", rke2UpgradeVersion)
if i < len([]string{
"v1.30.14+rke2r2",
"v1.31.13+rke2r1",
"v1.32.9+rke2r1",
"v1.33.5+rke2r1",
"v1.34.1+rke2r1",
})-1 {
fmt.Printf("RKE2 upgraded to intermediate version %s, starting next upgrade...\n", rke2UpgradeVersion)
}
}

Expand Down
4 changes: 3 additions & 1 deletion on-prem-installers/onprem/onprem_upgrade.sh
Original file line number Diff line number Diff line change
Expand Up @@ -803,13 +803,15 @@ sleep 10
# Restore secret after app delete but before postgress restored
yq e 'del(.metadata.labels, .metadata.annotations, .metadata.uid, .metadata.creationTimestamp)' postgres_secret.yaml | kubectl apply -f -

sleep 30
# Wait until PostgreSQL pod is running (Re-sync)
start_time=$(date +%s)
timeout=300 # 5 minutes in seconds
set +e
while true; do
echo "Checking PostgreSQL pod status..."
podname=$(kubectl get pods -n orch-database -l app.kubernetes.io/name=postgresql -o jsonpath='{.items[0].metadata.name}')
# CloudNativePG uses cnpg.io/cluster label instead of app.kubernetes.io/name
podname=$(kubectl get pods -n orch-database -l cnpg.io/cluster=postgresql-cluster,cnpg.io/instanceRole=primary -o jsonpath='{.items[0].metadata.name}')
pod_status=$(kubectl get pods -n orch-database "$podname" -o jsonpath='{.status.phase}')
if [[ "$pod_status" == "Running" ]]; then
echo "PostgreSQL pod is Running."
Expand Down
22 changes: 17 additions & 5 deletions on-prem-installers/onprem/upgrade_postgres.sh
Original file line number Diff line number Diff line change
Expand Up @@ -75,13 +75,25 @@ delete_postgres() {
kubectl delete secret --ignore-not-found=true -n $postgres_namespace postgresql
}

get_postgres_pod() {
kubectl get pods -n orch-database -l cnpg.io/cluster=postgresql-cluster,cnpg.io/instanceRole=primary -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "postgresql-0"
}

restore_postgres() {
kubectl exec -n $postgres_namespace $podname -- /bin/bash -c "$(typeset -f disable_security); disable_security"
remote_backup_path="/tmp/${postgres_namespace}_${podname}_backup.sql"
kubectl cp "$local_backup_path" "$postgres_namespace/$podname:$remote_backup_path"
podname=$(get_postgres_pod)
# kubectl exec -n $postgres_namespace $podname -- /bin/bash -c "$(typeset -f disable_security); disable_security"
remote_backup_path="/var/lib/postgresql/data/${postgres_namespace}_${podname}_backup.sql"

kubectl cp "$local_backup_path" "$postgres_namespace/$podname:$remote_backup_path" -c postgres

echo "Restoring backup databases from pod $podname in namespace $postgres_namespace..."

kubectl exec -n $postgres_namespace $podname -- /bin/bash -c "psql -U $POSTGRES_USERNAME < $remote_backup_path "
kubectl exec -n $postgres_namespace $podname -- /bin/bash -c "$(typeset -f enable_security); enable_security"
# Get postgres password from secret
PGPASSWORD=$(kubectl get secret -n $postgres_namespace postgresql -o jsonpath='{.data.postgres-password}' | base64 -d)

# CloudNativePG doesn't need security disable/enable, just use credentials
# Use the remote backup file that was copied to the pod
kubectl exec -n $postgres_namespace "$podname" -c postgres -- env PGPASSWORD="$PGPASSWORD" psql -U $POSTGRES_USERNAME -f "$remote_backup_path"

echo "Restore completed successfully."
}
Loading