Skip to content

Commit 2b9bcff

Browse files
authored
Merge pull request #108 from rjkoh/N8-KubernetesAutoScale
Add Mongo Kubernetes, fix request limits, fix init-mongo
2 parents 4552147 + 2f21637 commit 2b9bcff

File tree

6 files changed

+99
-32
lines changed

6 files changed

+99
-32
lines changed

deployKubernetes.sh

Lines changed: 23 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,33 @@
44
DOCKER_USERNAME="rjkoh"
55
TAG="latest"
66

7+
echo "Applying Kubernetes deployment file for kafka..."
8+
kubectl apply -f kubernetes/kafka/kafka-deployment.yaml --validate=false
9+
echo "Deployment for kafka complete."
10+
echo "----------------------------------------"
11+
12+
echo "Applying Kubernetes deployment file for zookeeper..."
13+
kubectl apply -f kubernetes/zookeeper/zookeeper-deployment.yaml --validate=false
14+
echo "Deployment for zookeeper complete."
15+
echo "----------------------------------------"
16+
17+
echo "Applying Kubernetes deployment file for redis..."
18+
kubectl apply -f kubernetes/redis/redis-deployment.yaml --validate=false
19+
echo "Deployment for redis complete."
20+
echo "----------------------------------------"
21+
22+
echo "Applying Kubernetes deployment file for mongo..."
23+
kubectl apply -f kubernetes/mongo/mongo-deployment.yaml --validate=false
24+
echo "Deployment for mongo complete."
25+
echo "----------------------------------------"
26+
727
# Lists of services and their corresponding deployment YAML paths
828
services=("collaboration-service" "matching-service" "question-service" "user-service")
929

1030
# Loop through each service
1131
for i in "${!services[@]}"; do
1232
SERVICE="${services[$i]}"
13-
SERVICE_NAME="cs3219-ay2425s1-project-g25-${SERVICE}"
33+
SERVICE_NAME="cs3219-ay2425s1-project-g25_${SERVICE}"
1434
K8S_DEPLOYMENT_FILE="kubernetes/backend/${SERVICE}-deployment.yaml"
1535
K8S_HPA_FILE="kubernetes/hpa/${SERVICE}-hpa.yaml"
1636

@@ -46,7 +66,7 @@ done
4666

4767
# frontend
4868
SERVICE="frontend"
49-
SERVICE_NAME="cs3219-ay2425s1-project-g25-${SERVICE}"
69+
SERVICE_NAME="cs3219-ay2425s1-project-g25_${SERVICE}"
5070
K8S_DEPLOYMENT_FILE="kubernetes/frontend/${SERVICE}-deployment.yaml"
5171
FULL_IMAGE_NAME="$DOCKER_USERNAME/$SERVICE_NAME:$TAG"
5272
echo "Tagging image $SERVICE as $FULL_IMAGE_NAME..."
@@ -62,7 +82,7 @@ echo "Deployment for $SERVICE complete."
6282
echo "----------------------------------------"
6383

6484
# nginx
65-
SERVICE_NAME="cs3219-ay2425s1-project-g25-nginx"
85+
SERVICE_NAME="cs3219-ay2425s1-project-g25_nginx"
6686
K8S_DEPLOYMENT_FILE="kubernetes/backend/nginx-deployment.yaml"
6787
FULL_IMAGE_NAME="$DOCKER_USERNAME/$SERVICE_NAME:$TAG"
6888
echo "Tagging image $SERVICE as $FULL_IMAGE_NAME..."
@@ -77,19 +97,4 @@ rm "$K8S_DEPLOYMENT_FILE.bak"
7797
echo "Deployment for $SERVICE complete."
7898
echo "----------------------------------------"
7999

80-
echo "Applying Kubernetes deployment file for kafka..."
81-
kubectl apply -f kubernetes/kafka/kafka-deployment.yaml --validate=false
82-
echo "Deployment for kafka complete."
83-
echo "----------------------------------------"
84-
85-
echo "Applying Kubernetes deployment file for zookeeper..."
86-
kubectl apply -f kubernetes/zookeeper/zookeeper-deployment.yaml --validate=false
87-
echo "Deployment for zookeeper complete."
88-
echo "----------------------------------------"
89-
90-
echo "Applying Kubernetes deployment file for redis..."
91-
kubectl apply -f kubernetes/redis/redis-deployment.yaml --validate=false
92-
echo "Deployment for redis complete."
93-
echo "----------------------------------------"
94-
95100
echo "All services deployed successfully."

init-mongo.js

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ const resetAllDatabases = true;
33
const MONGO_INITDB_ROOT_USERNAME = "admin";
44
const MONGO_INITDB_ROOT_PASSWORD = "password";
55
const PEERPREP_QUESTION_INITDB_NAME = "peerprepQuestionServiceDB";
6-
// const PEERPREP_USER_INITDB_NAME = "peerprepUserServiceDB";
6+
const PEERPREP_USER_INITDB_NAME = "peerprepUserServiceDB";
77
// const PEERPREP_COLLAB_INITDB_NAME = "peerprepCollabServiceDB";
88

99
db.getSiblingDB("admin").auth(MONGO_INITDB_ROOT_USERNAME, MONGO_INITDB_ROOT_PASSWORD);
@@ -221,7 +221,7 @@ if (isUserDatabaseResetDesired) {
221221
}
222222

223223
/*---------------USER-SERVICE-DB-INIT----------------*/
224-
/*const users = [
224+
const users = [
225225
{
226226
"username": "admin",
227227
"email": "[email protected]",
@@ -248,7 +248,7 @@ users.forEach(user => {
248248
isAdmin: user.isAdmin
249249
});
250250
}
251-
});*/
251+
});
252252

253253
/*-----------------------------INDEXING-(OPTIONAL)---------------------------------*/
254254

kubernetes/README.md

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,13 @@
1111
* `kubectl rollout restart deployment metrics-server -n kube-system`
1212

1313
3. Build the PeerPrep Docker containers with `docker compose build`
14-
4. Add your docker username in `deploy.sh` at root.
15-
5. To deploy on Kubernetes, run `./deploy.sh` at root.
16-
6. View deployments, pods and HPA. Ensure all services are running. `kubectl get all`
17-
7. Wait for a few minutes for kubernetes to become fully functional. It is ready when running `kubectl get hpa` does not show any `<unknown>` under `TARGETS`
18-
8. Load testing:
14+
4. Create the configmap for the nginx api-gateway: run `kubectl create configmap nginx-config --from-file=backend/api-gateway/nginx.conf` at root.
15+
5. Create the configmap for mongo: run `kubectl create configmap init-mongo-script --from-file=init-mongo.js` at root.
16+
6. Add your docker username in `deploy.sh` at root.
17+
7. To deploy on Kubernetes, run `./deploy.sh` at root.
18+
8. View deployments, pods and HPA. Ensure all services are running. `kubectl get all`
19+
9. Wait for a few minutes for kubernetes to become fully functional. It is ready when running `kubectl get hpa` does not show any `<unknown>` under `TARGETS`
20+
10. Load testing:
1921
* In a separate terminal, run command to carry conduct load testing
2022
```
2123
kubectl run -i --tty load-generator --rm --image=busybox:1.28 --restart=Never -- /bin/sh -c "while sleep 0.01; do wget -q -O- http://<service>.default.svc.cluster.local:<port>/<route>/test; done"
@@ -26,11 +28,12 @@
2628
3. Matching Service: `http://matching-service.default.svc.cluster.local:3003/api/matchtest`
2729
4. Collaboration Service: `http://collaboration-service.default.svc.cluster.local:3004/api/collab/test`
2830
* `Ctrl + C` to stop sending requests
31+
* Testing can also be done on our deployed instance, by replacing the address with `52.221.131.145`. For example, `http://52.221.131.145:3001/users/test`. However, this is unlikely to result in any scaling due to Amazon EC2 throttling EC2 API requests for each AWS account on a per-Region basis, hence limiting the load imposed on each service.
2932
30-
9. Monitor autoscaling with `kubectl get hpa <service>-hpa --watch`. This command will watch the HPA in real-time, showing changes in replica counts and metrics. It requires a few minutes for the pods to scale up and down. Replace `<service>` appropriately with:
33+
11. Monitor autoscaling with `kubectl get hpa <service>-hpa --watch`. This command will watch the HPA in real-time, showing changes in replica counts and metrics. It requires a few minutes for the pods to scale up and down. Replace `<service>` appropriately with:
3134
* user-service
3235
* question-service
3336
* matching-service
3437
* collaboration-service
3538
36-
10. `Ctrl + C` to exit. To stop and delete to prevent resource wastage: `kubectl delete all --all`
39+
12. `Ctrl + C` to exit. To stop and delete to prevent resource wastage: `kubectl delete all --all`

kubernetes/backend/matching-service-deployment.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@ spec:
2626
value: peerprep-kafka.default.svc.cluster.local
2727
resources:
2828
requests:
29-
cpu: "200m"
30-
memory: "512Mi"
29+
cpu: "100m"
30+
memory: "256Mi"
3131
limits:
3232
cpu: "500m"
3333
memory: "1Gi"

kubernetes/backend/user-service-deployment.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ spec:
2424
value: peerprep-mongodb.default.svc.cluster.local
2525
resources:
2626
requests:
27-
cpu: "100m"
28-
memory: "256Mi"
27+
cpu: "300m"
28+
memory: "768Mi"
2929
limits:
3030
cpu: "500m"
3131
memory: "1Gi"
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
apiVersion: apps/v1
2+
kind: Deployment
3+
metadata:
4+
name: peerprep-mongodb
5+
spec:
6+
replicas: 1
7+
selector:
8+
matchLabels:
9+
app: peerprep-mongodb
10+
template:
11+
metadata:
12+
labels:
13+
app: peerprep-mongodb
14+
spec:
15+
containers:
16+
- name: peerprep-mongodb
17+
image: mongo:latest
18+
ports:
19+
- containerPort: 27017
20+
env:
21+
- name: MONGO_INITDB_ROOT_USERNAME
22+
value: "admin"
23+
- name: MONGO_INITDB_ROOT_PASSWORD
24+
value: "password"
25+
volumeMounts:
26+
- name: init-script
27+
mountPath: /docker-entrypoint-initdb.d/init-mongo.js
28+
subPath: init-mongo.js
29+
volumes:
30+
- name: init-script
31+
configMap:
32+
name: init-mongo-script
33+
---
34+
apiVersion: v1
35+
kind: Service
36+
metadata:
37+
name: peerprep-mongodb
38+
spec:
39+
ports:
40+
- port: 27017
41+
targetPort: 27017
42+
selector:
43+
app: peerprep-mongodb
44+
type: ClusterIP
45+
46+
---
47+
apiVersion: v1
48+
kind: PersistentVolumeClaim
49+
metadata:
50+
name: mongodb-pvc
51+
spec:
52+
accessModes:
53+
- ReadWriteOnce
54+
resources:
55+
requests:
56+
storage: 1Gi
57+
58+
---
59+

0 commit comments

Comments
 (0)