File tree Expand file tree Collapse file tree 3 files changed +82
-22
lines changed Expand file tree Collapse file tree 3 files changed +82
-22
lines changed Original file line number Diff line number Diff line change @@ -2,49 +2,36 @@ version: "3.6"
22services :
33 # Master
44 master-node :
5- image : " jwaresolutions/bigdata -cluster"
5+ image : " jwaresolutions/big-data -cluster"
66 restart : " always"
7- # command: bash -c "echo 'n' | /usr/local/hadoop/bin/hdfs namenode -format &&
87 command : bash -c "/home/big_data/spark-cmd.sh start master-node"
98 networks :
10- - spark -net
9+ - cluster -net
1110 volumes :
1211 # - "./data:/home/big_data/data" # Your data
1312 - hdfs-master-data:/home/hadoop/data/nameNode
1413 - hdfs-master-checkpoint-data:/home/hadoop/data/namesecondary
15- deploy :
16- placement :
17- # set node labels using docker node update --label-add key=value <NODE ID> from swarm manager
18- constraints :
19- - node.labels.role==master
2014
2115 # Workers
2216 worker :
23- image : " jwaresolutions/bigdata -cluster"
17+ image : " jwaresolutions/big-data -cluster"
2418 restart : " always"
2519 command : bash -c "/home/big_data/spark-cmd.sh start"
2620 depends_on :
2721 - " master-node"
2822 volumes :
2923 - hdfs-worker-data:/home/hadoop/data/dataNode
30- deploy :
31- placement :
32- # set node labels using `docker node update --label-add key=value <NODE ID>` from swarm manager
33- constraints :
34- - node.labels.role==worker
35- # Deploy 3 containers for this service
36- replicas : 3
3724 networks :
38- - spark -net
25+ - cluster -net
3926
4027volumes :
4128 hdfs-master-data :
4229 hdfs-master-checkpoint-data :
4330 hdfs-worker-data :
4431
45- # Create the spark -net network
32+ # Create the cluster -net network
4633networks :
47- spark -net :
34+ cluster -net :
4835 name : " cluster_net" # Useful for format as it does not allow '-' char on command
4936 driver : bridge
5037 attachable : false # Attachable: true prevents user to connect to Hadoop panels
Original file line number Diff line number Diff line change 1+ version : " 3.6"
2+ services :
3+ # Master
4+ master-node :
5+ image : " jwaresolutions/big-data-cluster"
6+ command : bash -c "/home/big_data/spark-cmd.sh start master-node"
7+ ports :
8+ - target : 8088
9+ published : 8088
10+ protocol : tcp
11+ mode : host
12+ - target : 8080
13+ published : 8080
14+ protocol : tcp
15+ mode : host
16+ - target : 9870
17+ published : 9870
18+ protocol : tcp
19+ mode : host
20+ networks :
21+ - cluster-net
22+ volumes :
23+ # - "./data:/home/big_data/data" # Your data
24+ - hdfs-master-data:/home/hadoop/data/nameNode
25+ - hdfs-master-checkpoint-data:/home/hadoop/data/namesecondary
26+ deploy :
27+ mode : global # Required by Docker Swarm to make published ports work with other services
28+ endpoint_mode : dnsrr # Required to prevent
29+ placement :
30+ # Set node labels using `docker node update --label-add role=master <NODE ID>` from swarm manager
31+ constraints :
32+ - node.labels.role==master
33+
34+ # Workers
35+ worker :
36+ image : " jwaresolutions/big-data-cluster"
37+ command : bash -c "/home/big_data/spark-cmd.sh start"
38+ depends_on :
39+ - " master-node"
40+ volumes :
41+ - hdfs-worker-data:/home/hadoop/data/dataNode
42+ deploy :
43+ placement :
44+ # Set node labels using `docker node update --label-add role=worker <NODE ID>` from swarm manager
45+ constraints :
46+ - node.labels.role==worker
47+ # Deploy N containers for this service
48+ replicas : 3
49+ networks :
50+ - cluster-net
51+
52+ volumes :
53+ hdfs-master-data :
54+ external :
55+ name : ' hdsf_master_data_swarm'
56+ hdfs-master-checkpoint-data :
57+ external :
58+ name : ' hdsf_master_checkpoint_data_swarm'
59+ hdfs-worker-data :
60+ external :
61+ name : ' hdsf_worker_data_swarm'
62+
63+ # Uses cluster-net network
64+ networks :
65+ cluster-net :
66+ external : true
67+ name : cluster_net_swarm
Original file line number Diff line number Diff line change 11#! /bin/bash
22
3- imageName=" jwaresolutions/bigdata -cluster"
3+ imageName=" jwaresolutions/big-data -cluster"
44
55# Bring the services up
66function startServices {
@@ -35,6 +35,11 @@ if [[ $1 = "stop" ]]; then
3535 exit
3636fi
3737
38+ if [[ $1 = " remove" ]]; then
39+ docker rm master-node worker-1 worker-2 worker-3
40+ exit
41+ fi
42+
3843if [[ $1 = " deploy" ]]; then
3944 docker container rm -f ` docker ps -a | grep $imageName | awk ' { print $1 }' ` # delete old containers
4045 docker network rm cluster_net
6267echo " Usage: cluster.sh deploy|start|stop"
6368echo " deploy - create a new Docker network, containers (a master and 3 workers) and start these last"
6469echo " start - start the existing containers"
65- echo " stop - stop the running containers"
66- echo " info - useful URLs"
70+ echo " stop - stop the running containers"
71+ echo " remove - remove all the created containers"
72+ echo " info - useful URLs"
You can’t perform that action at this time.
0 commit comments