-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathtoy-cluster.sh
executable file
·74 lines (62 loc) · 2.49 KB
/
toy-cluster.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
#!/bin/bash
imageName="jwaresolutions/big-data-cluster:0.5.0"
# Bring the services up
function startServices {
docker start master-node worker-1 worker-2 worker-3
sleep 5
echo ">> Starting Master and Workers ..."
docker exec -d master-node /home/big_data/spark-cmd.sh start master-node
docker exec -d worker-1 /home/big_data/spark-cmd.sh start
docker exec -d worker-2 /home/big_data/spark-cmd.sh start
docker exec -d worker-3 /home/big_data/spark-cmd.sh start
show_info
}
function show_info {
masterIp=`docker inspect -f "{{ .NetworkSettings.Networks.cluster_net.IPAddress }}" master-node`
echo "Hadoop info @ master-node: http://$masterIp:8088/cluster"
echo "Spark info @ master-node: http://$masterIp:8080/"
echo "Spark applications logs @ master-node: http://$masterIp:18080/"
echo "DFS Health @ master-node: http://$masterIp:9870/dfshealth.html"
}
if [[ $1 = "start" ]]; then
startServices
exit
fi
if [[ $1 = "stop" ]]; then
docker exec -d master-node /home/big_data/spark-cmd.sh stop master-node
docker exec -d worker-1 /home/big_data/spark-cmd.sh stop
docker exec -d worker-2 /home/big_data/spark-cmd.sh stop
docker exec -d worker-3 /home/big_data/spark-cmd.sh stop
docker stop master-node worker-1 worker-2 worker-3
exit
fi
if [[ $1 = "remove" ]]; then
docker rm master-node worker-1 worker-2 worker-3
exit
fi
if [[ $1 = "deploy" ]]; then
docker container rm -f `docker ps -a | grep $imageName | awk '{ print $1 }'` # delete old containers
docker network rm cluster_net
docker network create --driver bridge cluster_net # create custom network
# 3 nodes
echo ">> Starting nodes master and worker nodes ..."
docker run -dP --network cluster_net --name master-node -h master-node -it $imageName
docker run -dP --network cluster_net --name worker-1 -it -h worker-1 $imageName
docker run -dP --network cluster_net --name worker-2 -it -h worker-2 $imageName
docker run -dP --network cluster_net --name worker-3 -it -h worker-3 $imageName
# Format master
echo ">> Formatting hdfs ..."
docker exec -it master-node ${HADOOP_HOME}/bin/hdfs namenode -format
startServices
exit
fi
if [[ $1 = "info" ]]; then
show_info
exit
fi
echo "Usage: cluster.sh deploy|start|stop"
echo " deploy - create a new Docker network, containers (a master and 3 workers) and start these last"
echo " start - start the existing containers"
echo " stop - stop the running containers"
echo " remove - remove all the created containers"
echo " info - useful URLs"