From b1ab9b0fec4361578f86a81ece39d327e25a6445 Mon Sep 17 00:00:00 2001 From: Mario Manno Date: Wed, 6 Nov 2024 17:37:23 +0100 Subject: [PATCH] Add benchmark suite * add benchmarks * add test data * experiments use group label instead of repo-name label w fleet 0.9.11 * add toggle for metrics * script uses stable benchmark order via --seed * check for at least one cluster * skip confusing report table printing as we don't use sampling --- .github/workflows/ci.yml | 2 +- .github/workflows/release-fleet.yml | 2 +- .golangci.json | 12 + benchmarks/assets/create-1-bundle/bundle.yaml | 325 + .../bundle.yaml | 3126 + .../gitrepo.yaml | 14 + .../create-1-gitrepo-1-bundle/gitrepo.yaml | 14 + .../create-1-gitrepo-50-bundle/gitrepo.yaml | 14 + .../assets/create-50-bundle/bundles.yaml | 16250 ++ .../bundles.yaml | 156300 +++++++++++++++ .../create-50-gitrepo-50-bundle/gitrepos.yaml | 850 + benchmarks/deploy_test.go | 133 + benchmarks/gitrepo_bundle_test.go | 138 + benchmarks/record/record.go | 339 + benchmarks/report/report.go | 244 + benchmarks/suite_test.go | 177 + benchmarks/targeting_test.go | 80 + dev/benchmarks.sh | 28 + go.mod | 5 +- go.sum | 10 +- 20 files changed, 178055 insertions(+), 8 deletions(-) create mode 100644 benchmarks/assets/create-1-bundle/bundle.yaml create mode 100644 benchmarks/assets/create-1-bundledeployment-10-resources/bundle.yaml create mode 100644 benchmarks/assets/create-1-gitrepo-1-big-bundle/gitrepo.yaml create mode 100644 benchmarks/assets/create-1-gitrepo-1-bundle/gitrepo.yaml create mode 100644 benchmarks/assets/create-1-gitrepo-50-bundle/gitrepo.yaml create mode 100644 benchmarks/assets/create-50-bundle/bundles.yaml create mode 100644 benchmarks/assets/create-50-bundledeployment-500-resources/bundles.yaml create mode 100644 benchmarks/assets/create-50-gitrepo-50-bundle/gitrepos.yaml create mode 100644 benchmarks/deploy_test.go create mode 100644 benchmarks/gitrepo_bundle_test.go create mode 100644 benchmarks/record/record.go create mode 100644 benchmarks/report/report.go create mode 100644 benchmarks/suite_test.go create mode 100644 benchmarks/targeting_test.go create mode 100755 dev/benchmarks.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 73636e9f00..037c7be1ce 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,7 +37,7 @@ jobs: check-latest: true - name: unit-test - run: go test -shuffle=on $(go list ./... | grep -v -e /e2e -e /integrationtests) + run: go test -shuffle=on $(go list ./... | grep -v -e /e2e -e /integrationtests -e /benchmarks) - name: Install Ginkgo CLI run: go install github.com/onsi/ginkgo/v2/ginkgo diff --git a/.github/workflows/release-fleet.yml b/.github/workflows/release-fleet.yml index 0f3e88cbc7..e11bbb10fc 100644 --- a/.github/workflows/release-fleet.yml +++ b/.github/workflows/release-fleet.yml @@ -62,7 +62,7 @@ jobs: - name: Run unit tests continue-on-error: ${{ contains(github.ref, 'rc') }} - run: go test -cover -tags=test $(go list ./... | grep -v -e /e2e -e /integrationtests) + run: go test -cover -tags=test $(go list ./... | grep -v -e /e2e -e /integrationtests -e /benchmarks) - name: Install Ginkgo CLI run: go install github.com/onsi/ginkgo/v2/ginkgo diff --git a/.golangci.json b/.golangci.json index f77aca9034..191721cb12 100644 --- a/.golangci.json +++ b/.golangci.json @@ -78,6 +78,18 @@ "linters": [ "gosec" ] + }, + { + "path": "integrationtests", + "linters": [ + "gosec" + ] + }, + { + "path": "benchmarks", + "linters": [ + "gosec" + ] } ] } diff --git a/benchmarks/assets/create-1-bundle/bundle.yaml b/benchmarks/assets/create-1-bundle/bundle.yaml new file mode 100644 index 0000000000..2fa20a5d10 --- /dev/null +++ b/benchmarks/assets/create-1-bundle/bundle.yaml @@ -0,0 +1,325 @@ +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-1-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-1-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFIX:=/usr/local/hadoop} + + . $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFIX/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFIX/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFIX}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" diff --git a/benchmarks/assets/create-1-bundledeployment-10-resources/bundle.yaml b/benchmarks/assets/create-1-bundledeployment-10-resources/bundle.yaml new file mode 100644 index 0000000000..44ba983e0b --- /dev/null +++ b/benchmarks/assets/create-1-bundledeployment-10-resources/bundle.yaml @@ -0,0 +1,3126 @@ +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-1-bundledeployment-10-resources + name: create-1-bundledeployment-10-resources +spec: + defaultNamespace: create-1-bundledeployment-10-resources + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFIX:=/usr/local/hadoop} + + . $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFIX/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFIX/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFIX}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFIX:=/usr/local/hadoop} + + . $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFIX/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFIX/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFIX}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFIX:=/usr/local/hadoop} + + . $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFIX/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFIX/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFIX}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFIX:=/usr/local/hadoop} + + . $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFIX/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFIX/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFIX}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFIX:=/usr/local/hadoop} + + . $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFIX/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFIX/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFIX}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFIX:=/usr/local/hadoop} + + . $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFIX/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFIX/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFIX}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFIX:=/usr/local/hadoop} + + . $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFIX/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFIX/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFIX}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFIX:=/usr/local/hadoop} + + . $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFIX/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFIX/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFIX}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFIX:=/usr/local/hadoop} + + . $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFIX/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFIX/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFIX}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFIX:=/usr/local/hadoop} + + . $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFIX/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFIX/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFIX/sbin/ + cd $HADOOP_PREFIX/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFIX}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBEXEC_DIR="$bin"/../libexec + HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBEXEC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" diff --git a/benchmarks/assets/create-1-gitrepo-1-big-bundle/gitrepo.yaml b/benchmarks/assets/create-1-gitrepo-1-big-bundle/gitrepo.yaml new file mode 100644 index 0000000000..adb4d1073d --- /dev/null +++ b/benchmarks/assets/create-1-gitrepo-1-big-bundle/gitrepo.yaml @@ -0,0 +1,14 @@ +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-1-gitrepo-1-big-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-1-gitrepo-1-big-bundle + targetNamespace: bm-1-gitrepo-1-big-bundle + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" diff --git a/benchmarks/assets/create-1-gitrepo-1-bundle/gitrepo.yaml b/benchmarks/assets/create-1-gitrepo-1-bundle/gitrepo.yaml new file mode 100644 index 0000000000..c569315eea --- /dev/null +++ b/benchmarks/assets/create-1-gitrepo-1-bundle/gitrepo.yaml @@ -0,0 +1,14 @@ +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-1-gitrepo-1-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-1-gitrepo-1-bundle + targetNamespace: bm-1-gitrepo-1-bundle + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" diff --git a/benchmarks/assets/create-1-gitrepo-50-bundle/gitrepo.yaml b/benchmarks/assets/create-1-gitrepo-50-bundle/gitrepo.yaml new file mode 100644 index 0000000000..42ef0a3d39 --- /dev/null +++ b/benchmarks/assets/create-1-gitrepo-50-bundle/gitrepo.yaml @@ -0,0 +1,14 @@ +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-1-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-1-gitrepo-50-bundle + targetNamespace: bm-1-gitrepo-50-bundle + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" diff --git a/benchmarks/assets/create-50-bundle/bundles.yaml b/benchmarks/assets/create-50-bundle/bundles.yaml new file mode 100644 index 0000000000..b5cc8e8c50 --- /dev/null +++ b/benchmarks/assets/create-50-bundle/bundles.yaml @@ -0,0 +1,16250 @@ +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-1-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-1 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI1:=/usr/local/hadoop} + + . $HADOOP_PREFI1/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI1/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI1/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI1/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI1}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI1}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-2-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-2 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI2:=/usr/local/hadoop} + + . $HADOOP_PREFI2/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI2/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI2/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI2/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI2}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI2}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-3-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-3 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI3:=/usr/local/hadoop} + + . $HADOOP_PREFI3/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI3/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI3/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI3/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI3}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI3}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-4-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-4 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI4:=/usr/local/hadoop} + + . $HADOOP_PREFI4/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI4/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI4/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI4/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI4}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI4}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-5-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-5 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI5:=/usr/local/hadoop} + + . $HADOOP_PREFI5/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI5/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI5/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI5/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI5}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI5}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-6-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-6 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI6:=/usr/local/hadoop} + + . $HADOOP_PREFI6/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI6/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI6/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI6/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI6}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI6}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-7-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-7 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI7:=/usr/local/hadoop} + + . $HADOOP_PREFI7/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI7/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI7/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI7/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI7}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI7}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-8-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-8 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI8:=/usr/local/hadoop} + + . $HADOOP_PREFI8/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI8/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI8/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI8/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI8}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI8}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-9-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-9 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI9:=/usr/local/hadoop} + + . $HADOOP_PREFI9/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI9/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI9/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI9/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI9}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI9}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-10-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-10 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI10:=/usr/local/hadoop} + + . $HADOOP_PREFI10/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI10/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI10/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI10/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI10}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI10}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-11-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-11 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI11:=/usr/local/hadoop} + + . $HADOOP_PREFI11/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI11/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI11/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI11/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI11}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI11}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-12-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-12 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI12:=/usr/local/hadoop} + + . $HADOOP_PREFI12/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI12/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI12/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI12/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI12}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI12}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-13-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-13 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI13:=/usr/local/hadoop} + + . $HADOOP_PREFI13/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI13/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI13/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI13/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI13}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI13}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-14-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-14 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI14:=/usr/local/hadoop} + + . $HADOOP_PREFI14/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI14/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI14/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI14/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI14}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI14}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-15-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-15 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI15:=/usr/local/hadoop} + + . $HADOOP_PREFI15/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI15/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI15/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI15/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI15}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI15}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-16-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-16 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI16:=/usr/local/hadoop} + + . $HADOOP_PREFI16/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI16/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI16/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI16/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI16}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI16}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-17-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-17 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI17:=/usr/local/hadoop} + + . $HADOOP_PREFI17/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI17/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI17/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI17/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI17}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI17}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-18-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-18 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI18:=/usr/local/hadoop} + + . $HADOOP_PREFI18/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI18/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI18/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI18/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI18}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI18}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-19-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-19 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI19:=/usr/local/hadoop} + + . $HADOOP_PREFI19/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI19/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI19/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI19/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI19}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI19}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-20-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-20 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI20:=/usr/local/hadoop} + + . $HADOOP_PREFI20/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI20/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI20/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI20/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI20}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI20}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-21-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-21 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI21:=/usr/local/hadoop} + + . $HADOOP_PREFI21/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI21/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI21/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI21/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI21}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI21}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-22-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-22 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI22:=/usr/local/hadoop} + + . $HADOOP_PREFI22/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI22/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI22/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI22/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI22}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI22}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-23-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-23 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI23:=/usr/local/hadoop} + + . $HADOOP_PREFI23/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI23/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI23/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI23/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI23}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI23}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-24-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-24 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI24:=/usr/local/hadoop} + + . $HADOOP_PREFI24/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI24/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI24/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI24/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI24}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI24}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-25-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-25 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI25:=/usr/local/hadoop} + + . $HADOOP_PREFI25/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI25/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI25/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI25/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI25}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI25}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-26-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-26 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI26:=/usr/local/hadoop} + + . $HADOOP_PREFI26/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI26/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI26/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI26/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI26}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI26}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-27-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-27 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI27:=/usr/local/hadoop} + + . $HADOOP_PREFI27/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI27/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI27/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI27/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI27}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI27}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-28-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-28 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI28:=/usr/local/hadoop} + + . $HADOOP_PREFI28/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI28/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI28/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI28/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI28}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI28}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-29-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-29 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI29:=/usr/local/hadoop} + + . $HADOOP_PREFI29/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI29/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI29/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI29/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI29}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI29}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-30-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-30 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI30:=/usr/local/hadoop} + + . $HADOOP_PREFI30/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI30/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI30/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI30/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI30}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI30}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-31-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-31 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI31:=/usr/local/hadoop} + + . $HADOOP_PREFI31/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI31/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI31/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI31/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI31}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI31}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-32-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-32 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI32:=/usr/local/hadoop} + + . $HADOOP_PREFI32/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI32/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI32/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI32/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI32}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI32}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-33-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-33 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI33:=/usr/local/hadoop} + + . $HADOOP_PREFI33/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI33/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI33/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI33/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI33}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI33}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-34-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-34 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI34:=/usr/local/hadoop} + + . $HADOOP_PREFI34/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI34/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI34/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI34/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI34}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI34}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-35-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-35 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI35:=/usr/local/hadoop} + + . $HADOOP_PREFI35/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI35/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI35/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI35/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI35}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI35}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-36-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-36 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI36:=/usr/local/hadoop} + + . $HADOOP_PREFI36/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI36/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI36/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI36/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI36}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI36}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-37-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-37 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI37:=/usr/local/hadoop} + + . $HADOOP_PREFI37/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI37/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI37/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI37/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI37}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI37}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-38-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-38 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI38:=/usr/local/hadoop} + + . $HADOOP_PREFI38/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI38/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI38/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI38/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI38}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI38}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-39-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-39 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI39:=/usr/local/hadoop} + + . $HADOOP_PREFI39/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI39/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI39/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI39/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI39}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI39}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-40-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-40 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI40:=/usr/local/hadoop} + + . $HADOOP_PREFI40/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI40/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI40/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI40/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI40}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI40}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-41-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-41 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI41:=/usr/local/hadoop} + + . $HADOOP_PREFI41/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI41/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI41/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI41/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI41}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI41}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-42-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-42 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI42:=/usr/local/hadoop} + + . $HADOOP_PREFI42/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI42/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI42/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI42/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI42}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI42}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-43-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-43 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI43:=/usr/local/hadoop} + + . $HADOOP_PREFI43/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI43/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI43/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI43/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI43}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI43}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-44-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-44 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI44:=/usr/local/hadoop} + + . $HADOOP_PREFI44/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI44/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI44/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI44/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI44}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI44}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-45-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-45 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI45:=/usr/local/hadoop} + + . $HADOOP_PREFI45/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI45/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI45/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI45/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI45}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI45}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-46-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-46 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI46:=/usr/local/hadoop} + + . $HADOOP_PREFI46/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI46/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI46/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI46/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI46}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI46}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-47-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-47 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI47:=/usr/local/hadoop} + + . $HADOOP_PREFI47/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI47/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI47/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI47/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI47}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI47}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-48-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-48 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI48:=/usr/local/hadoop} + + . $HADOOP_PREFI48/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI48/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI48/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI48/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI48}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI48}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-49-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-49 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI49:=/usr/local/hadoop} + + . $HADOOP_PREFI49/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI49/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI49/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI49/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI49}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI49}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + name: create-50-bundle + labels: + fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94 + fleet.cattle.io/benchmark-group: create-50-bundle +spec: + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-config-10kb-50 + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI50:=/usr/local/hadoop} + + . $HADOOP_PREFI50/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI50/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI50/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI50/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI50}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI50}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: configmap.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" diff --git a/benchmarks/assets/create-50-bundledeployment-500-resources/bundles.yaml b/benchmarks/assets/create-50-bundledeployment-500-resources/bundles.yaml new file mode 100644 index 0000000000..634ff29be8 --- /dev/null +++ b/benchmarks/assets/create-50-bundledeployment-500-resources/bundles.yaml @@ -0,0 +1,156300 @@ +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-1 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-1 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI1:=/usr/local/hadoop} + + . $HADOOP_PREFI1/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI1/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI1/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI1/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI1}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI1}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI1:=/usr/local/hadoop} + + . $HADOOP_PREFI1/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI1/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI1/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI1/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI1}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI1}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI1:=/usr/local/hadoop} + + . $HADOOP_PREFI1/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI1/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI1/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI1/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI1}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI1}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI1:=/usr/local/hadoop} + + . $HADOOP_PREFI1/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI1/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI1/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI1/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI1}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI1}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI1:=/usr/local/hadoop} + + . $HADOOP_PREFI1/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI1/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI1/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI1/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI1}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI1}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI1:=/usr/local/hadoop} + + . $HADOOP_PREFI1/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI1/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI1/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI1/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI1}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI1}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI1:=/usr/local/hadoop} + + . $HADOOP_PREFI1/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI1/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI1/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI1/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI1}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI1}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI1:=/usr/local/hadoop} + + . $HADOOP_PREFI1/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI1/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI1/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI1/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI1}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI1}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI1:=/usr/local/hadoop} + + . $HADOOP_PREFI1/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI1/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI1/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI1/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI1}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI1}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI1:=/usr/local/hadoop} + + . $HADOOP_PREFI1/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI1/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI1/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI1/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI1/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI1/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI1/sbin/ + cd $HADOOP_PREFI1/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI1}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI1}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE1EC_DIR="$bin"/../libexec + HADOOP_LIBE1EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE1EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-2 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-2 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI2:=/usr/local/hadoop} + + . $HADOOP_PREFI2/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI2/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI2/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI2/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI2}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI2}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI2:=/usr/local/hadoop} + + . $HADOOP_PREFI2/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI2/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI2/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI2/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI2}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI2}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI2:=/usr/local/hadoop} + + . $HADOOP_PREFI2/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI2/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI2/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI2/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI2}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI2}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI2:=/usr/local/hadoop} + + . $HADOOP_PREFI2/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI2/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI2/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI2/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI2}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI2}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI2:=/usr/local/hadoop} + + . $HADOOP_PREFI2/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI2/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI2/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI2/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI2}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI2}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI2:=/usr/local/hadoop} + + . $HADOOP_PREFI2/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI2/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI2/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI2/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI2}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI2}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI2:=/usr/local/hadoop} + + . $HADOOP_PREFI2/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI2/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI2/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI2/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI2}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI2}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI2:=/usr/local/hadoop} + + . $HADOOP_PREFI2/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI2/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI2/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI2/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI2}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI2}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI2:=/usr/local/hadoop} + + . $HADOOP_PREFI2/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI2/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI2/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI2/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI2}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI2}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI2:=/usr/local/hadoop} + + . $HADOOP_PREFI2/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI2/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI2/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI2/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI2/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI2/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI2/sbin/ + cd $HADOOP_PREFI2/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI2}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI2}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE2EC_DIR="$bin"/../libexec + HADOOP_LIBE2EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE2EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-3 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-3 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI3:=/usr/local/hadoop} + + . $HADOOP_PREFI3/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI3/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI3/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI3/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI3}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI3}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI3:=/usr/local/hadoop} + + . $HADOOP_PREFI3/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI3/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI3/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI3/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI3}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI3}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI3:=/usr/local/hadoop} + + . $HADOOP_PREFI3/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI3/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI3/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI3/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI3}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI3}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI3:=/usr/local/hadoop} + + . $HADOOP_PREFI3/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI3/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI3/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI3/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI3}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI3}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI3:=/usr/local/hadoop} + + . $HADOOP_PREFI3/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI3/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI3/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI3/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI3}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI3}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI3:=/usr/local/hadoop} + + . $HADOOP_PREFI3/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI3/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI3/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI3/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI3}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI3}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI3:=/usr/local/hadoop} + + . $HADOOP_PREFI3/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI3/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI3/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI3/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI3}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI3}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI3:=/usr/local/hadoop} + + . $HADOOP_PREFI3/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI3/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI3/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI3/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI3}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI3}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI3:=/usr/local/hadoop} + + . $HADOOP_PREFI3/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI3/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI3/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI3/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI3}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI3}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI3:=/usr/local/hadoop} + + . $HADOOP_PREFI3/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI3/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI3/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI3/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI3/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI3/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI3/sbin/ + cd $HADOOP_PREFI3/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI3}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI3}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE3EC_DIR="$bin"/../libexec + HADOOP_LIBE3EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE3EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-4 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-4 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI4:=/usr/local/hadoop} + + . $HADOOP_PREFI4/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI4/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI4/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI4/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI4}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI4}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI4:=/usr/local/hadoop} + + . $HADOOP_PREFI4/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI4/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI4/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI4/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI4}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI4}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI4:=/usr/local/hadoop} + + . $HADOOP_PREFI4/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI4/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI4/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI4/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI4}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI4}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI4:=/usr/local/hadoop} + + . $HADOOP_PREFI4/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI4/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI4/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI4/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI4}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI4}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI4:=/usr/local/hadoop} + + . $HADOOP_PREFI4/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI4/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI4/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI4/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI4}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI4}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI4:=/usr/local/hadoop} + + . $HADOOP_PREFI4/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI4/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI4/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI4/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI4}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI4}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI4:=/usr/local/hadoop} + + . $HADOOP_PREFI4/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI4/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI4/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI4/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI4}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI4}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI4:=/usr/local/hadoop} + + . $HADOOP_PREFI4/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI4/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI4/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI4/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI4}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI4}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI4:=/usr/local/hadoop} + + . $HADOOP_PREFI4/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI4/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI4/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI4/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI4}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI4}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI4:=/usr/local/hadoop} + + . $HADOOP_PREFI4/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI4/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI4/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI4/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI4/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI4/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI4/sbin/ + cd $HADOOP_PREFI4/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI4}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI4}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE4EC_DIR="$bin"/../libexec + HADOOP_LIBE4EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE4EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-5 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-5 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI5:=/usr/local/hadoop} + + . $HADOOP_PREFI5/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI5/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI5/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI5/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI5}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI5}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI5:=/usr/local/hadoop} + + . $HADOOP_PREFI5/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI5/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI5/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI5/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI5}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI5}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI5:=/usr/local/hadoop} + + . $HADOOP_PREFI5/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI5/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI5/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI5/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI5}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI5}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI5:=/usr/local/hadoop} + + . $HADOOP_PREFI5/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI5/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI5/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI5/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI5}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI5}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI5:=/usr/local/hadoop} + + . $HADOOP_PREFI5/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI5/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI5/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI5/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI5}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI5}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI5:=/usr/local/hadoop} + + . $HADOOP_PREFI5/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI5/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI5/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI5/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI5}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI5}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI5:=/usr/local/hadoop} + + . $HADOOP_PREFI5/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI5/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI5/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI5/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI5}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI5}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI5:=/usr/local/hadoop} + + . $HADOOP_PREFI5/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI5/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI5/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI5/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI5}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI5}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI5:=/usr/local/hadoop} + + . $HADOOP_PREFI5/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI5/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI5/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI5/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI5}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI5}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI5:=/usr/local/hadoop} + + . $HADOOP_PREFI5/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI5/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI5/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI5/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI5/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI5/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI5/sbin/ + cd $HADOOP_PREFI5/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI5}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI5}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE5EC_DIR="$bin"/../libexec + HADOOP_LIBE5EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE5EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-6 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-6 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI6:=/usr/local/hadoop} + + . $HADOOP_PREFI6/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI6/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI6/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI6/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI6}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI6}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI6:=/usr/local/hadoop} + + . $HADOOP_PREFI6/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI6/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI6/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI6/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI6}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI6}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI6:=/usr/local/hadoop} + + . $HADOOP_PREFI6/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI6/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI6/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI6/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI6}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI6}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI6:=/usr/local/hadoop} + + . $HADOOP_PREFI6/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI6/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI6/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI6/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI6}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI6}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI6:=/usr/local/hadoop} + + . $HADOOP_PREFI6/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI6/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI6/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI6/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI6}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI6}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI6:=/usr/local/hadoop} + + . $HADOOP_PREFI6/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI6/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI6/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI6/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI6}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI6}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI6:=/usr/local/hadoop} + + . $HADOOP_PREFI6/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI6/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI6/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI6/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI6}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI6}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI6:=/usr/local/hadoop} + + . $HADOOP_PREFI6/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI6/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI6/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI6/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI6}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI6}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI6:=/usr/local/hadoop} + + . $HADOOP_PREFI6/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI6/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI6/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI6/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI6}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI6}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI6:=/usr/local/hadoop} + + . $HADOOP_PREFI6/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI6/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI6/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI6/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI6/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI6/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI6/sbin/ + cd $HADOOP_PREFI6/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI6}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI6}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE6EC_DIR="$bin"/../libexec + HADOOP_LIBE6EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE6EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-7 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-7 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI7:=/usr/local/hadoop} + + . $HADOOP_PREFI7/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI7/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI7/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI7/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI7}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI7}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI7:=/usr/local/hadoop} + + . $HADOOP_PREFI7/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI7/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI7/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI7/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI7}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI7}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI7:=/usr/local/hadoop} + + . $HADOOP_PREFI7/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI7/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI7/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI7/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI7}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI7}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI7:=/usr/local/hadoop} + + . $HADOOP_PREFI7/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI7/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI7/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI7/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI7}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI7}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI7:=/usr/local/hadoop} + + . $HADOOP_PREFI7/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI7/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI7/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI7/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI7}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI7}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI7:=/usr/local/hadoop} + + . $HADOOP_PREFI7/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI7/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI7/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI7/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI7}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI7}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI7:=/usr/local/hadoop} + + . $HADOOP_PREFI7/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI7/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI7/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI7/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI7}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI7}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI7:=/usr/local/hadoop} + + . $HADOOP_PREFI7/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI7/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI7/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI7/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI7}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI7}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI7:=/usr/local/hadoop} + + . $HADOOP_PREFI7/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI7/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI7/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI7/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI7}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI7}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI7:=/usr/local/hadoop} + + . $HADOOP_PREFI7/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI7/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI7/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI7/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI7/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI7/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI7/sbin/ + cd $HADOOP_PREFI7/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI7}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI7}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE7EC_DIR="$bin"/../libexec + HADOOP_LIBE7EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE7EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-8 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-8 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI8:=/usr/local/hadoop} + + . $HADOOP_PREFI8/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI8/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI8/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI8/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI8}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI8}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI8:=/usr/local/hadoop} + + . $HADOOP_PREFI8/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI8/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI8/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI8/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI8}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI8}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI8:=/usr/local/hadoop} + + . $HADOOP_PREFI8/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI8/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI8/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI8/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI8}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI8}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI8:=/usr/local/hadoop} + + . $HADOOP_PREFI8/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI8/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI8/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI8/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI8}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI8}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI8:=/usr/local/hadoop} + + . $HADOOP_PREFI8/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI8/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI8/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI8/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI8}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI8}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI8:=/usr/local/hadoop} + + . $HADOOP_PREFI8/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI8/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI8/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI8/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI8}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI8}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI8:=/usr/local/hadoop} + + . $HADOOP_PREFI8/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI8/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI8/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI8/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI8}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI8}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI8:=/usr/local/hadoop} + + . $HADOOP_PREFI8/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI8/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI8/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI8/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI8}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI8}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI8:=/usr/local/hadoop} + + . $HADOOP_PREFI8/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI8/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI8/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI8/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI8}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI8}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI8:=/usr/local/hadoop} + + . $HADOOP_PREFI8/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI8/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI8/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI8/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI8/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI8/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI8/sbin/ + cd $HADOOP_PREFI8/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI8}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI8}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE8EC_DIR="$bin"/../libexec + HADOOP_LIBE8EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE8EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-9 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-9 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI9:=/usr/local/hadoop} + + . $HADOOP_PREFI9/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI9/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI9/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI9/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI9}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI9}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI9:=/usr/local/hadoop} + + . $HADOOP_PREFI9/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI9/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI9/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI9/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI9}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI9}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI9:=/usr/local/hadoop} + + . $HADOOP_PREFI9/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI9/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI9/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI9/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI9}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI9}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI9:=/usr/local/hadoop} + + . $HADOOP_PREFI9/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI9/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI9/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI9/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI9}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI9}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI9:=/usr/local/hadoop} + + . $HADOOP_PREFI9/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI9/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI9/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI9/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI9}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI9}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI9:=/usr/local/hadoop} + + . $HADOOP_PREFI9/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI9/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI9/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI9/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI9}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI9}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI9:=/usr/local/hadoop} + + . $HADOOP_PREFI9/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI9/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI9/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI9/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI9}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI9}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI9:=/usr/local/hadoop} + + . $HADOOP_PREFI9/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI9/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI9/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI9/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI9}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI9}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI9:=/usr/local/hadoop} + + . $HADOOP_PREFI9/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI9/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI9/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI9/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI9}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI9}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI9:=/usr/local/hadoop} + + . $HADOOP_PREFI9/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI9/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI9/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI9/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI9/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI9/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI9/sbin/ + cd $HADOOP_PREFI9/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI9}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI9}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE9EC_DIR="$bin"/../libexec + HADOOP_LIBE9EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE9EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-10 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-10 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI10:=/usr/local/hadoop} + + . $HADOOP_PREFI10/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI10/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI10/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI10/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI10}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI10}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI10:=/usr/local/hadoop} + + . $HADOOP_PREFI10/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI10/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI10/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI10/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI10}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI10}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI10:=/usr/local/hadoop} + + . $HADOOP_PREFI10/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI10/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI10/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI10/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI10}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI10}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI10:=/usr/local/hadoop} + + . $HADOOP_PREFI10/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI10/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI10/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI10/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI10}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI10}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI10:=/usr/local/hadoop} + + . $HADOOP_PREFI10/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI10/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI10/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI10/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI10}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI10}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI10:=/usr/local/hadoop} + + . $HADOOP_PREFI10/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI10/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI10/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI10/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI10}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI10}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI10:=/usr/local/hadoop} + + . $HADOOP_PREFI10/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI10/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI10/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI10/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI10}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI10}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI10:=/usr/local/hadoop} + + . $HADOOP_PREFI10/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI10/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI10/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI10/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI10}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI10}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI10:=/usr/local/hadoop} + + . $HADOOP_PREFI10/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI10/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI10/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI10/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI10}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI10}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI10:=/usr/local/hadoop} + + . $HADOOP_PREFI10/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI10/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI10/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI10/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI10/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI10/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI10/sbin/ + cd $HADOOP_PREFI10/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI10}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI10}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE10EC_DIR="$bin"/../libexec + HADOOP_LIBE10EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE10EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-11 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-11 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI11:=/usr/local/hadoop} + + . $HADOOP_PREFI11/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI11/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI11/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI11/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI11}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI11}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI11:=/usr/local/hadoop} + + . $HADOOP_PREFI11/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI11/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI11/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI11/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI11}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI11}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI11:=/usr/local/hadoop} + + . $HADOOP_PREFI11/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI11/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI11/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI11/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI11}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI11}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI11:=/usr/local/hadoop} + + . $HADOOP_PREFI11/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI11/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI11/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI11/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI11}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI11}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI11:=/usr/local/hadoop} + + . $HADOOP_PREFI11/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI11/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI11/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI11/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI11}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI11}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI11:=/usr/local/hadoop} + + . $HADOOP_PREFI11/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI11/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI11/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI11/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI11}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI11}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI11:=/usr/local/hadoop} + + . $HADOOP_PREFI11/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI11/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI11/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI11/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI11}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI11}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI11:=/usr/local/hadoop} + + . $HADOOP_PREFI11/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI11/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI11/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI11/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI11}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI11}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI11:=/usr/local/hadoop} + + . $HADOOP_PREFI11/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI11/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI11/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI11/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI11}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI11}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI11:=/usr/local/hadoop} + + . $HADOOP_PREFI11/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI11/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI11/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI11/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI11/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI11/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI11/sbin/ + cd $HADOOP_PREFI11/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI11}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI11}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE11EC_DIR="$bin"/../libexec + HADOOP_LIBE11EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE11EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-12 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-12 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI12:=/usr/local/hadoop} + + . $HADOOP_PREFI12/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI12/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI12/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI12/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI12}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI12}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI12:=/usr/local/hadoop} + + . $HADOOP_PREFI12/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI12/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI12/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI12/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI12}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI12}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI12:=/usr/local/hadoop} + + . $HADOOP_PREFI12/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI12/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI12/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI12/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI12}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI12}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI12:=/usr/local/hadoop} + + . $HADOOP_PREFI12/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI12/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI12/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI12/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI12}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI12}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI12:=/usr/local/hadoop} + + . $HADOOP_PREFI12/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI12/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI12/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI12/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI12}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI12}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI12:=/usr/local/hadoop} + + . $HADOOP_PREFI12/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI12/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI12/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI12/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI12}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI12}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI12:=/usr/local/hadoop} + + . $HADOOP_PREFI12/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI12/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI12/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI12/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI12}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI12}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI12:=/usr/local/hadoop} + + . $HADOOP_PREFI12/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI12/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI12/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI12/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI12}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI12}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI12:=/usr/local/hadoop} + + . $HADOOP_PREFI12/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI12/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI12/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI12/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI12}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI12}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI12:=/usr/local/hadoop} + + . $HADOOP_PREFI12/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI12/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI12/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI12/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI12/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI12/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI12/sbin/ + cd $HADOOP_PREFI12/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI12}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI12}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE12EC_DIR="$bin"/../libexec + HADOOP_LIBE12EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE12EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-13 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-13 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI13:=/usr/local/hadoop} + + . $HADOOP_PREFI13/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI13/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI13/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI13/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI13}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI13}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI13:=/usr/local/hadoop} + + . $HADOOP_PREFI13/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI13/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI13/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI13/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI13}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI13}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI13:=/usr/local/hadoop} + + . $HADOOP_PREFI13/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI13/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI13/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI13/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI13}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI13}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI13:=/usr/local/hadoop} + + . $HADOOP_PREFI13/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI13/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI13/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI13/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI13}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI13}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI13:=/usr/local/hadoop} + + . $HADOOP_PREFI13/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI13/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI13/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI13/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI13}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI13}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI13:=/usr/local/hadoop} + + . $HADOOP_PREFI13/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI13/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI13/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI13/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI13}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI13}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI13:=/usr/local/hadoop} + + . $HADOOP_PREFI13/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI13/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI13/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI13/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI13}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI13}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI13:=/usr/local/hadoop} + + . $HADOOP_PREFI13/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI13/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI13/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI13/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI13}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI13}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI13:=/usr/local/hadoop} + + . $HADOOP_PREFI13/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI13/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI13/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI13/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI13}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI13}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI13:=/usr/local/hadoop} + + . $HADOOP_PREFI13/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI13/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI13/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI13/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI13/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI13/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI13/sbin/ + cd $HADOOP_PREFI13/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI13}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI13}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE13EC_DIR="$bin"/../libexec + HADOOP_LIBE13EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE13EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-14 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-14 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI14:=/usr/local/hadoop} + + . $HADOOP_PREFI14/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI14/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI14/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI14/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI14}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI14}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI14:=/usr/local/hadoop} + + . $HADOOP_PREFI14/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI14/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI14/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI14/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI14}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI14}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI14:=/usr/local/hadoop} + + . $HADOOP_PREFI14/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI14/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI14/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI14/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI14}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI14}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI14:=/usr/local/hadoop} + + . $HADOOP_PREFI14/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI14/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI14/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI14/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI14}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI14}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI14:=/usr/local/hadoop} + + . $HADOOP_PREFI14/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI14/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI14/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI14/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI14}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI14}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI14:=/usr/local/hadoop} + + . $HADOOP_PREFI14/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI14/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI14/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI14/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI14}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI14}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI14:=/usr/local/hadoop} + + . $HADOOP_PREFI14/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI14/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI14/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI14/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI14}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI14}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI14:=/usr/local/hadoop} + + . $HADOOP_PREFI14/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI14/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI14/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI14/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI14}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI14}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI14:=/usr/local/hadoop} + + . $HADOOP_PREFI14/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI14/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI14/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI14/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI14}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI14}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI14:=/usr/local/hadoop} + + . $HADOOP_PREFI14/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI14/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI14/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI14/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI14/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI14/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI14/sbin/ + cd $HADOOP_PREFI14/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI14}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI14}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE14EC_DIR="$bin"/../libexec + HADOOP_LIBE14EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE14EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-15 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-15 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI15:=/usr/local/hadoop} + + . $HADOOP_PREFI15/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI15/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI15/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI15/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI15}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI15}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI15:=/usr/local/hadoop} + + . $HADOOP_PREFI15/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI15/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI15/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI15/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI15}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI15}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI15:=/usr/local/hadoop} + + . $HADOOP_PREFI15/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI15/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI15/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI15/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI15}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI15}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI15:=/usr/local/hadoop} + + . $HADOOP_PREFI15/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI15/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI15/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI15/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI15}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI15}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI15:=/usr/local/hadoop} + + . $HADOOP_PREFI15/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI15/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI15/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI15/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI15}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI15}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI15:=/usr/local/hadoop} + + . $HADOOP_PREFI15/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI15/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI15/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI15/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI15}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI15}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI15:=/usr/local/hadoop} + + . $HADOOP_PREFI15/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI15/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI15/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI15/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI15}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI15}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI15:=/usr/local/hadoop} + + . $HADOOP_PREFI15/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI15/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI15/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI15/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI15}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI15}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI15:=/usr/local/hadoop} + + . $HADOOP_PREFI15/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI15/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI15/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI15/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI15}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI15}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI15:=/usr/local/hadoop} + + . $HADOOP_PREFI15/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI15/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI15/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI15/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI15/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI15/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI15/sbin/ + cd $HADOOP_PREFI15/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI15}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI15}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE15EC_DIR="$bin"/../libexec + HADOOP_LIBE15EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE15EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-16 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-16 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI16:=/usr/local/hadoop} + + . $HADOOP_PREFI16/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI16/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI16/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI16/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI16}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI16}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI16:=/usr/local/hadoop} + + . $HADOOP_PREFI16/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI16/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI16/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI16/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI16}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI16}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI16:=/usr/local/hadoop} + + . $HADOOP_PREFI16/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI16/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI16/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI16/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI16}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI16}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI16:=/usr/local/hadoop} + + . $HADOOP_PREFI16/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI16/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI16/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI16/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI16}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI16}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI16:=/usr/local/hadoop} + + . $HADOOP_PREFI16/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI16/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI16/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI16/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI16}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI16}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI16:=/usr/local/hadoop} + + . $HADOOP_PREFI16/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI16/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI16/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI16/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI16}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI16}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI16:=/usr/local/hadoop} + + . $HADOOP_PREFI16/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI16/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI16/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI16/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI16}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI16}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI16:=/usr/local/hadoop} + + . $HADOOP_PREFI16/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI16/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI16/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI16/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI16}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI16}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI16:=/usr/local/hadoop} + + . $HADOOP_PREFI16/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI16/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI16/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI16/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI16}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI16}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI16:=/usr/local/hadoop} + + . $HADOOP_PREFI16/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI16/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI16/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI16/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI16/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI16/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI16/sbin/ + cd $HADOOP_PREFI16/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI16}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI16}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE16EC_DIR="$bin"/../libexec + HADOOP_LIBE16EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE16EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-17 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-17 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI17:=/usr/local/hadoop} + + . $HADOOP_PREFI17/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI17/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI17/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI17/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI17}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI17}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI17:=/usr/local/hadoop} + + . $HADOOP_PREFI17/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI17/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI17/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI17/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI17}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI17}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI17:=/usr/local/hadoop} + + . $HADOOP_PREFI17/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI17/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI17/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI17/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI17}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI17}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI17:=/usr/local/hadoop} + + . $HADOOP_PREFI17/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI17/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI17/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI17/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI17}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI17}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI17:=/usr/local/hadoop} + + . $HADOOP_PREFI17/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI17/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI17/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI17/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI17}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI17}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI17:=/usr/local/hadoop} + + . $HADOOP_PREFI17/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI17/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI17/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI17/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI17}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI17}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI17:=/usr/local/hadoop} + + . $HADOOP_PREFI17/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI17/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI17/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI17/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI17}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI17}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI17:=/usr/local/hadoop} + + . $HADOOP_PREFI17/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI17/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI17/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI17/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI17}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI17}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI17:=/usr/local/hadoop} + + . $HADOOP_PREFI17/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI17/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI17/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI17/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI17}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI17}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI17:=/usr/local/hadoop} + + . $HADOOP_PREFI17/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI17/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI17/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI17/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI17/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI17/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI17/sbin/ + cd $HADOOP_PREFI17/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI17}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI17}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE17EC_DIR="$bin"/../libexec + HADOOP_LIBE17EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE17EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-18 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-18 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI18:=/usr/local/hadoop} + + . $HADOOP_PREFI18/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI18/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI18/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI18/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI18}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI18}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI18:=/usr/local/hadoop} + + . $HADOOP_PREFI18/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI18/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI18/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI18/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI18}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI18}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI18:=/usr/local/hadoop} + + . $HADOOP_PREFI18/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI18/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI18/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI18/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI18}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI18}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI18:=/usr/local/hadoop} + + . $HADOOP_PREFI18/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI18/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI18/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI18/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI18}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI18}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI18:=/usr/local/hadoop} + + . $HADOOP_PREFI18/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI18/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI18/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI18/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI18}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI18}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI18:=/usr/local/hadoop} + + . $HADOOP_PREFI18/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI18/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI18/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI18/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI18}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI18}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI18:=/usr/local/hadoop} + + . $HADOOP_PREFI18/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI18/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI18/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI18/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI18}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI18}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI18:=/usr/local/hadoop} + + . $HADOOP_PREFI18/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI18/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI18/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI18/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI18}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI18}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI18:=/usr/local/hadoop} + + . $HADOOP_PREFI18/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI18/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI18/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI18/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI18}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI18}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI18:=/usr/local/hadoop} + + . $HADOOP_PREFI18/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI18/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI18/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI18/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI18/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI18/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI18/sbin/ + cd $HADOOP_PREFI18/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI18}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI18}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE18EC_DIR="$bin"/../libexec + HADOOP_LIBE18EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE18EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-19 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-19 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI19:=/usr/local/hadoop} + + . $HADOOP_PREFI19/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI19/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI19/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI19/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI19}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI19}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI19:=/usr/local/hadoop} + + . $HADOOP_PREFI19/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI19/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI19/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI19/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI19}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI19}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI19:=/usr/local/hadoop} + + . $HADOOP_PREFI19/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI19/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI19/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI19/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI19}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI19}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI19:=/usr/local/hadoop} + + . $HADOOP_PREFI19/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI19/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI19/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI19/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI19}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI19}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI19:=/usr/local/hadoop} + + . $HADOOP_PREFI19/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI19/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI19/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI19/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI19}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI19}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI19:=/usr/local/hadoop} + + . $HADOOP_PREFI19/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI19/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI19/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI19/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI19}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI19}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI19:=/usr/local/hadoop} + + . $HADOOP_PREFI19/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI19/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI19/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI19/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI19}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI19}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI19:=/usr/local/hadoop} + + . $HADOOP_PREFI19/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI19/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI19/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI19/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI19}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI19}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI19:=/usr/local/hadoop} + + . $HADOOP_PREFI19/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI19/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI19/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI19/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI19}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI19}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI19:=/usr/local/hadoop} + + . $HADOOP_PREFI19/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI19/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI19/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI19/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI19/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI19/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI19/sbin/ + cd $HADOOP_PREFI19/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI19}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI19}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE19EC_DIR="$bin"/../libexec + HADOOP_LIBE19EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE19EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-20 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-20 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI20:=/usr/local/hadoop} + + . $HADOOP_PREFI20/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI20/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI20/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI20/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI20}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI20}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI20:=/usr/local/hadoop} + + . $HADOOP_PREFI20/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI20/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI20/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI20/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI20}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI20}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI20:=/usr/local/hadoop} + + . $HADOOP_PREFI20/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI20/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI20/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI20/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI20}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI20}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI20:=/usr/local/hadoop} + + . $HADOOP_PREFI20/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI20/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI20/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI20/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI20}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI20}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI20:=/usr/local/hadoop} + + . $HADOOP_PREFI20/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI20/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI20/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI20/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI20}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI20}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI20:=/usr/local/hadoop} + + . $HADOOP_PREFI20/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI20/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI20/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI20/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI20}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI20}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI20:=/usr/local/hadoop} + + . $HADOOP_PREFI20/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI20/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI20/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI20/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI20}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI20}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI20:=/usr/local/hadoop} + + . $HADOOP_PREFI20/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI20/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI20/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI20/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI20}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI20}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI20:=/usr/local/hadoop} + + . $HADOOP_PREFI20/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI20/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI20/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI20/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI20}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI20}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI20:=/usr/local/hadoop} + + . $HADOOP_PREFI20/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI20/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI20/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI20/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI20/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI20/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI20/sbin/ + cd $HADOOP_PREFI20/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI20}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI20}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE20EC_DIR="$bin"/../libexec + HADOOP_LIBE20EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE20EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-21 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-21 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI21:=/usr/local/hadoop} + + . $HADOOP_PREFI21/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI21/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI21/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI21/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI21}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI21}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI21:=/usr/local/hadoop} + + . $HADOOP_PREFI21/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI21/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI21/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI21/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI21}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI21}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI21:=/usr/local/hadoop} + + . $HADOOP_PREFI21/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI21/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI21/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI21/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI21}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI21}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI21:=/usr/local/hadoop} + + . $HADOOP_PREFI21/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI21/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI21/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI21/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI21}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI21}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI21:=/usr/local/hadoop} + + . $HADOOP_PREFI21/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI21/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI21/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI21/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI21}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI21}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI21:=/usr/local/hadoop} + + . $HADOOP_PREFI21/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI21/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI21/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI21/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI21}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI21}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI21:=/usr/local/hadoop} + + . $HADOOP_PREFI21/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI21/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI21/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI21/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI21}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI21}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI21:=/usr/local/hadoop} + + . $HADOOP_PREFI21/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI21/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI21/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI21/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI21}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI21}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI21:=/usr/local/hadoop} + + . $HADOOP_PREFI21/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI21/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI21/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI21/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI21}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI21}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI21:=/usr/local/hadoop} + + . $HADOOP_PREFI21/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI21/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI21/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI21/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI21/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI21/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI21/sbin/ + cd $HADOOP_PREFI21/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI21}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI21}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE21EC_DIR="$bin"/../libexec + HADOOP_LIBE21EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE21EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-22 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-22 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI22:=/usr/local/hadoop} + + . $HADOOP_PREFI22/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI22/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI22/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI22/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI22}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI22}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI22:=/usr/local/hadoop} + + . $HADOOP_PREFI22/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI22/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI22/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI22/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI22}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI22}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI22:=/usr/local/hadoop} + + . $HADOOP_PREFI22/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI22/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI22/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI22/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI22}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI22}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI22:=/usr/local/hadoop} + + . $HADOOP_PREFI22/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI22/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI22/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI22/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI22}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI22}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI22:=/usr/local/hadoop} + + . $HADOOP_PREFI22/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI22/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI22/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI22/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI22}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI22}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI22:=/usr/local/hadoop} + + . $HADOOP_PREFI22/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI22/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI22/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI22/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI22}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI22}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI22:=/usr/local/hadoop} + + . $HADOOP_PREFI22/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI22/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI22/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI22/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI22}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI22}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI22:=/usr/local/hadoop} + + . $HADOOP_PREFI22/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI22/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI22/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI22/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI22}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI22}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI22:=/usr/local/hadoop} + + . $HADOOP_PREFI22/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI22/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI22/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI22/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI22}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI22}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI22:=/usr/local/hadoop} + + . $HADOOP_PREFI22/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI22/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI22/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI22/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI22/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI22/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI22/sbin/ + cd $HADOOP_PREFI22/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI22}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI22}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE22EC_DIR="$bin"/../libexec + HADOOP_LIBE22EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE22EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-23 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-23 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI23:=/usr/local/hadoop} + + . $HADOOP_PREFI23/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI23/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI23/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI23/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI23}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI23}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI23:=/usr/local/hadoop} + + . $HADOOP_PREFI23/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI23/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI23/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI23/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI23}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI23}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI23:=/usr/local/hadoop} + + . $HADOOP_PREFI23/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI23/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI23/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI23/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI23}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI23}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI23:=/usr/local/hadoop} + + . $HADOOP_PREFI23/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI23/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI23/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI23/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI23}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI23}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI23:=/usr/local/hadoop} + + . $HADOOP_PREFI23/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI23/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI23/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI23/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI23}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI23}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI23:=/usr/local/hadoop} + + . $HADOOP_PREFI23/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI23/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI23/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI23/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI23}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI23}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI23:=/usr/local/hadoop} + + . $HADOOP_PREFI23/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI23/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI23/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI23/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI23}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI23}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI23:=/usr/local/hadoop} + + . $HADOOP_PREFI23/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI23/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI23/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI23/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI23}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI23}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI23:=/usr/local/hadoop} + + . $HADOOP_PREFI23/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI23/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI23/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI23/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI23}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI23}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI23:=/usr/local/hadoop} + + . $HADOOP_PREFI23/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI23/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI23/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI23/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI23/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI23/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI23/sbin/ + cd $HADOOP_PREFI23/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI23}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI23}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE23EC_DIR="$bin"/../libexec + HADOOP_LIBE23EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE23EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-24 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-24 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI24:=/usr/local/hadoop} + + . $HADOOP_PREFI24/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI24/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI24/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI24/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI24}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI24}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI24:=/usr/local/hadoop} + + . $HADOOP_PREFI24/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI24/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI24/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI24/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI24}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI24}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI24:=/usr/local/hadoop} + + . $HADOOP_PREFI24/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI24/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI24/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI24/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI24}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI24}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI24:=/usr/local/hadoop} + + . $HADOOP_PREFI24/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI24/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI24/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI24/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI24}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI24}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI24:=/usr/local/hadoop} + + . $HADOOP_PREFI24/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI24/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI24/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI24/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI24}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI24}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI24:=/usr/local/hadoop} + + . $HADOOP_PREFI24/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI24/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI24/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI24/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI24}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI24}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI24:=/usr/local/hadoop} + + . $HADOOP_PREFI24/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI24/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI24/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI24/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI24}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI24}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI24:=/usr/local/hadoop} + + . $HADOOP_PREFI24/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI24/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI24/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI24/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI24}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI24}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI24:=/usr/local/hadoop} + + . $HADOOP_PREFI24/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI24/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI24/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI24/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI24}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI24}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI24:=/usr/local/hadoop} + + . $HADOOP_PREFI24/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI24/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI24/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI24/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI24/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI24/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI24/sbin/ + cd $HADOOP_PREFI24/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI24}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI24}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE24EC_DIR="$bin"/../libexec + HADOOP_LIBE24EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE24EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-25 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-25 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI25:=/usr/local/hadoop} + + . $HADOOP_PREFI25/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI25/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI25/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI25/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI25}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI25}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI25:=/usr/local/hadoop} + + . $HADOOP_PREFI25/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI25/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI25/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI25/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI25}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI25}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI25:=/usr/local/hadoop} + + . $HADOOP_PREFI25/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI25/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI25/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI25/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI25}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI25}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI25:=/usr/local/hadoop} + + . $HADOOP_PREFI25/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI25/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI25/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI25/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI25}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI25}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI25:=/usr/local/hadoop} + + . $HADOOP_PREFI25/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI25/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI25/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI25/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI25}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI25}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI25:=/usr/local/hadoop} + + . $HADOOP_PREFI25/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI25/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI25/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI25/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI25}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI25}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI25:=/usr/local/hadoop} + + . $HADOOP_PREFI25/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI25/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI25/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI25/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI25}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI25}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI25:=/usr/local/hadoop} + + . $HADOOP_PREFI25/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI25/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI25/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI25/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI25}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI25}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI25:=/usr/local/hadoop} + + . $HADOOP_PREFI25/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI25/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI25/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI25/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI25}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI25}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI25:=/usr/local/hadoop} + + . $HADOOP_PREFI25/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI25/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI25/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI25/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI25/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI25/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI25/sbin/ + cd $HADOOP_PREFI25/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI25}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI25}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE25EC_DIR="$bin"/../libexec + HADOOP_LIBE25EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE25EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-26 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-26 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI26:=/usr/local/hadoop} + + . $HADOOP_PREFI26/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI26/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI26/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI26/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI26}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI26}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI26:=/usr/local/hadoop} + + . $HADOOP_PREFI26/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI26/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI26/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI26/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI26}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI26}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI26:=/usr/local/hadoop} + + . $HADOOP_PREFI26/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI26/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI26/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI26/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI26}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI26}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI26:=/usr/local/hadoop} + + . $HADOOP_PREFI26/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI26/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI26/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI26/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI26}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI26}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI26:=/usr/local/hadoop} + + . $HADOOP_PREFI26/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI26/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI26/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI26/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI26}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI26}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI26:=/usr/local/hadoop} + + . $HADOOP_PREFI26/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI26/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI26/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI26/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI26}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI26}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI26:=/usr/local/hadoop} + + . $HADOOP_PREFI26/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI26/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI26/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI26/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI26}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI26}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI26:=/usr/local/hadoop} + + . $HADOOP_PREFI26/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI26/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI26/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI26/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI26}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI26}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI26:=/usr/local/hadoop} + + . $HADOOP_PREFI26/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI26/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI26/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI26/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI26}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI26}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI26:=/usr/local/hadoop} + + . $HADOOP_PREFI26/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI26/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI26/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI26/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI26/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI26/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI26/sbin/ + cd $HADOOP_PREFI26/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI26}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI26}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE26EC_DIR="$bin"/../libexec + HADOOP_LIBE26EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE26EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-27 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-27 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI27:=/usr/local/hadoop} + + . $HADOOP_PREFI27/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI27/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI27/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI27/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI27}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI27}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI27:=/usr/local/hadoop} + + . $HADOOP_PREFI27/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI27/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI27/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI27/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI27}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI27}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI27:=/usr/local/hadoop} + + . $HADOOP_PREFI27/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI27/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI27/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI27/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI27}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI27}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI27:=/usr/local/hadoop} + + . $HADOOP_PREFI27/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI27/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI27/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI27/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI27}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI27}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI27:=/usr/local/hadoop} + + . $HADOOP_PREFI27/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI27/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI27/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI27/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI27}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI27}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI27:=/usr/local/hadoop} + + . $HADOOP_PREFI27/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI27/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI27/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI27/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI27}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI27}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI27:=/usr/local/hadoop} + + . $HADOOP_PREFI27/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI27/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI27/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI27/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI27}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI27}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI27:=/usr/local/hadoop} + + . $HADOOP_PREFI27/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI27/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI27/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI27/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI27}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI27}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI27:=/usr/local/hadoop} + + . $HADOOP_PREFI27/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI27/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI27/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI27/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI27}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI27}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI27:=/usr/local/hadoop} + + . $HADOOP_PREFI27/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI27/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI27/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI27/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI27/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI27/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI27/sbin/ + cd $HADOOP_PREFI27/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI27}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI27}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE27EC_DIR="$bin"/../libexec + HADOOP_LIBE27EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE27EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-28 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-28 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI28:=/usr/local/hadoop} + + . $HADOOP_PREFI28/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI28/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI28/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI28/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI28}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI28}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI28:=/usr/local/hadoop} + + . $HADOOP_PREFI28/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI28/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI28/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI28/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI28}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI28}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI28:=/usr/local/hadoop} + + . $HADOOP_PREFI28/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI28/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI28/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI28/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI28}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI28}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI28:=/usr/local/hadoop} + + . $HADOOP_PREFI28/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI28/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI28/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI28/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI28}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI28}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI28:=/usr/local/hadoop} + + . $HADOOP_PREFI28/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI28/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI28/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI28/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI28}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI28}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI28:=/usr/local/hadoop} + + . $HADOOP_PREFI28/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI28/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI28/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI28/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI28}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI28}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI28:=/usr/local/hadoop} + + . $HADOOP_PREFI28/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI28/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI28/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI28/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI28}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI28}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI28:=/usr/local/hadoop} + + . $HADOOP_PREFI28/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI28/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI28/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI28/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI28}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI28}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI28:=/usr/local/hadoop} + + . $HADOOP_PREFI28/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI28/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI28/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI28/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI28}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI28}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI28:=/usr/local/hadoop} + + . $HADOOP_PREFI28/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI28/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI28/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI28/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI28/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI28/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI28/sbin/ + cd $HADOOP_PREFI28/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI28}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI28}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE28EC_DIR="$bin"/../libexec + HADOOP_LIBE28EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE28EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-29 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-29 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI29:=/usr/local/hadoop} + + . $HADOOP_PREFI29/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI29/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI29/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI29/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI29}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI29}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI29:=/usr/local/hadoop} + + . $HADOOP_PREFI29/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI29/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI29/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI29/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI29}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI29}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI29:=/usr/local/hadoop} + + . $HADOOP_PREFI29/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI29/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI29/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI29/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI29}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI29}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI29:=/usr/local/hadoop} + + . $HADOOP_PREFI29/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI29/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI29/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI29/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI29}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI29}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI29:=/usr/local/hadoop} + + . $HADOOP_PREFI29/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI29/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI29/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI29/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI29}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI29}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI29:=/usr/local/hadoop} + + . $HADOOP_PREFI29/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI29/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI29/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI29/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI29}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI29}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI29:=/usr/local/hadoop} + + . $HADOOP_PREFI29/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI29/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI29/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI29/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI29}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI29}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI29:=/usr/local/hadoop} + + . $HADOOP_PREFI29/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI29/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI29/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI29/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI29}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI29}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI29:=/usr/local/hadoop} + + . $HADOOP_PREFI29/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI29/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI29/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI29/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI29}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI29}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI29:=/usr/local/hadoop} + + . $HADOOP_PREFI29/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI29/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI29/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI29/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI29/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI29/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI29/sbin/ + cd $HADOOP_PREFI29/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI29}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI29}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE29EC_DIR="$bin"/../libexec + HADOOP_LIBE29EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE29EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-30 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-30 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI30:=/usr/local/hadoop} + + . $HADOOP_PREFI30/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI30/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI30/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI30/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI30}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI30}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI30:=/usr/local/hadoop} + + . $HADOOP_PREFI30/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI30/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI30/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI30/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI30}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI30}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI30:=/usr/local/hadoop} + + . $HADOOP_PREFI30/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI30/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI30/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI30/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI30}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI30}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI30:=/usr/local/hadoop} + + . $HADOOP_PREFI30/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI30/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI30/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI30/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI30}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI30}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI30:=/usr/local/hadoop} + + . $HADOOP_PREFI30/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI30/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI30/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI30/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI30}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI30}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI30:=/usr/local/hadoop} + + . $HADOOP_PREFI30/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI30/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI30/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI30/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI30}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI30}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI30:=/usr/local/hadoop} + + . $HADOOP_PREFI30/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI30/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI30/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI30/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI30}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI30}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI30:=/usr/local/hadoop} + + . $HADOOP_PREFI30/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI30/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI30/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI30/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI30}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI30}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI30:=/usr/local/hadoop} + + . $HADOOP_PREFI30/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI30/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI30/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI30/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI30}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI30}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI30:=/usr/local/hadoop} + + . $HADOOP_PREFI30/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI30/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI30/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI30/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI30/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI30/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI30/sbin/ + cd $HADOOP_PREFI30/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI30}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI30}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE30EC_DIR="$bin"/../libexec + HADOOP_LIBE30EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE30EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-31 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-31 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI31:=/usr/local/hadoop} + + . $HADOOP_PREFI31/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI31/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI31/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI31/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI31}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI31}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI31:=/usr/local/hadoop} + + . $HADOOP_PREFI31/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI31/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI31/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI31/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI31}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI31}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI31:=/usr/local/hadoop} + + . $HADOOP_PREFI31/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI31/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI31/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI31/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI31}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI31}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI31:=/usr/local/hadoop} + + . $HADOOP_PREFI31/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI31/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI31/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI31/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI31}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI31}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI31:=/usr/local/hadoop} + + . $HADOOP_PREFI31/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI31/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI31/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI31/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI31}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI31}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI31:=/usr/local/hadoop} + + . $HADOOP_PREFI31/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI31/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI31/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI31/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI31}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI31}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI31:=/usr/local/hadoop} + + . $HADOOP_PREFI31/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI31/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI31/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI31/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI31}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI31}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI31:=/usr/local/hadoop} + + . $HADOOP_PREFI31/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI31/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI31/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI31/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI31}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI31}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI31:=/usr/local/hadoop} + + . $HADOOP_PREFI31/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI31/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI31/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI31/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI31}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI31}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI31:=/usr/local/hadoop} + + . $HADOOP_PREFI31/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI31/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI31/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI31/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI31/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI31/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI31/sbin/ + cd $HADOOP_PREFI31/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI31}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI31}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE31EC_DIR="$bin"/../libexec + HADOOP_LIBE31EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE31EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-32 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-32 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI32:=/usr/local/hadoop} + + . $HADOOP_PREFI32/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI32/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI32/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI32/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI32}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI32}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI32:=/usr/local/hadoop} + + . $HADOOP_PREFI32/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI32/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI32/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI32/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI32}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI32}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI32:=/usr/local/hadoop} + + . $HADOOP_PREFI32/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI32/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI32/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI32/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI32}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI32}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI32:=/usr/local/hadoop} + + . $HADOOP_PREFI32/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI32/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI32/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI32/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI32}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI32}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI32:=/usr/local/hadoop} + + . $HADOOP_PREFI32/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI32/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI32/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI32/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI32}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI32}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI32:=/usr/local/hadoop} + + . $HADOOP_PREFI32/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI32/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI32/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI32/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI32}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI32}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI32:=/usr/local/hadoop} + + . $HADOOP_PREFI32/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI32/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI32/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI32/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI32}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI32}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI32:=/usr/local/hadoop} + + . $HADOOP_PREFI32/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI32/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI32/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI32/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI32}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI32}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI32:=/usr/local/hadoop} + + . $HADOOP_PREFI32/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI32/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI32/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI32/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI32}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI32}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI32:=/usr/local/hadoop} + + . $HADOOP_PREFI32/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI32/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI32/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI32/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI32/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI32/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI32/sbin/ + cd $HADOOP_PREFI32/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI32}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI32}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE32EC_DIR="$bin"/../libexec + HADOOP_LIBE32EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE32EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-33 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-33 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI33:=/usr/local/hadoop} + + . $HADOOP_PREFI33/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI33/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI33/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI33/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI33}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI33}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI33:=/usr/local/hadoop} + + . $HADOOP_PREFI33/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI33/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI33/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI33/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI33}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI33}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI33:=/usr/local/hadoop} + + . $HADOOP_PREFI33/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI33/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI33/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI33/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI33}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI33}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI33:=/usr/local/hadoop} + + . $HADOOP_PREFI33/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI33/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI33/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI33/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI33}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI33}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI33:=/usr/local/hadoop} + + . $HADOOP_PREFI33/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI33/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI33/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI33/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI33}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI33}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI33:=/usr/local/hadoop} + + . $HADOOP_PREFI33/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI33/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI33/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI33/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI33}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI33}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI33:=/usr/local/hadoop} + + . $HADOOP_PREFI33/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI33/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI33/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI33/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI33}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI33}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI33:=/usr/local/hadoop} + + . $HADOOP_PREFI33/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI33/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI33/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI33/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI33}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI33}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI33:=/usr/local/hadoop} + + . $HADOOP_PREFI33/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI33/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI33/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI33/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI33}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI33}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI33:=/usr/local/hadoop} + + . $HADOOP_PREFI33/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI33/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI33/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI33/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI33/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI33/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI33/sbin/ + cd $HADOOP_PREFI33/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI33}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI33}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE33EC_DIR="$bin"/../libexec + HADOOP_LIBE33EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE33EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-34 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-34 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI34:=/usr/local/hadoop} + + . $HADOOP_PREFI34/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI34/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI34/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI34/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI34}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI34}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI34:=/usr/local/hadoop} + + . $HADOOP_PREFI34/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI34/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI34/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI34/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI34}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI34}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI34:=/usr/local/hadoop} + + . $HADOOP_PREFI34/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI34/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI34/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI34/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI34}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI34}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI34:=/usr/local/hadoop} + + . $HADOOP_PREFI34/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI34/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI34/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI34/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI34}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI34}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI34:=/usr/local/hadoop} + + . $HADOOP_PREFI34/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI34/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI34/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI34/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI34}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI34}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI34:=/usr/local/hadoop} + + . $HADOOP_PREFI34/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI34/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI34/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI34/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI34}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI34}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI34:=/usr/local/hadoop} + + . $HADOOP_PREFI34/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI34/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI34/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI34/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI34}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI34}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI34:=/usr/local/hadoop} + + . $HADOOP_PREFI34/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI34/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI34/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI34/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI34}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI34}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI34:=/usr/local/hadoop} + + . $HADOOP_PREFI34/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI34/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI34/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI34/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI34}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI34}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI34:=/usr/local/hadoop} + + . $HADOOP_PREFI34/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI34/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI34/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI34/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI34/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI34/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI34/sbin/ + cd $HADOOP_PREFI34/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI34}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI34}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE34EC_DIR="$bin"/../libexec + HADOOP_LIBE34EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE34EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-35 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-35 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI35:=/usr/local/hadoop} + + . $HADOOP_PREFI35/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI35/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI35/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI35/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI35}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI35}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI35:=/usr/local/hadoop} + + . $HADOOP_PREFI35/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI35/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI35/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI35/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI35}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI35}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI35:=/usr/local/hadoop} + + . $HADOOP_PREFI35/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI35/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI35/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI35/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI35}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI35}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI35:=/usr/local/hadoop} + + . $HADOOP_PREFI35/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI35/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI35/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI35/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI35}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI35}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI35:=/usr/local/hadoop} + + . $HADOOP_PREFI35/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI35/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI35/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI35/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI35}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI35}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI35:=/usr/local/hadoop} + + . $HADOOP_PREFI35/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI35/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI35/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI35/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI35}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI35}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI35:=/usr/local/hadoop} + + . $HADOOP_PREFI35/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI35/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI35/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI35/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI35}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI35}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI35:=/usr/local/hadoop} + + . $HADOOP_PREFI35/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI35/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI35/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI35/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI35}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI35}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI35:=/usr/local/hadoop} + + . $HADOOP_PREFI35/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI35/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI35/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI35/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI35}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI35}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI35:=/usr/local/hadoop} + + . $HADOOP_PREFI35/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI35/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI35/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI35/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI35/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI35/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI35/sbin/ + cd $HADOOP_PREFI35/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI35}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI35}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE35EC_DIR="$bin"/../libexec + HADOOP_LIBE35EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE35EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-36 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-36 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI36:=/usr/local/hadoop} + + . $HADOOP_PREFI36/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI36/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI36/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI36/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI36}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI36}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI36:=/usr/local/hadoop} + + . $HADOOP_PREFI36/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI36/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI36/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI36/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI36}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI36}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI36:=/usr/local/hadoop} + + . $HADOOP_PREFI36/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI36/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI36/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI36/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI36}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI36}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI36:=/usr/local/hadoop} + + . $HADOOP_PREFI36/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI36/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI36/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI36/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI36}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI36}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI36:=/usr/local/hadoop} + + . $HADOOP_PREFI36/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI36/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI36/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI36/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI36}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI36}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI36:=/usr/local/hadoop} + + . $HADOOP_PREFI36/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI36/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI36/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI36/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI36}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI36}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI36:=/usr/local/hadoop} + + . $HADOOP_PREFI36/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI36/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI36/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI36/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI36}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI36}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI36:=/usr/local/hadoop} + + . $HADOOP_PREFI36/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI36/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI36/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI36/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI36}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI36}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI36:=/usr/local/hadoop} + + . $HADOOP_PREFI36/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI36/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI36/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI36/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI36}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI36}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI36:=/usr/local/hadoop} + + . $HADOOP_PREFI36/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI36/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI36/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI36/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI36/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI36/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI36/sbin/ + cd $HADOOP_PREFI36/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI36}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI36}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE36EC_DIR="$bin"/../libexec + HADOOP_LIBE36EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE36EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-37 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-37 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI37:=/usr/local/hadoop} + + . $HADOOP_PREFI37/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI37/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI37/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI37/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI37}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI37}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI37:=/usr/local/hadoop} + + . $HADOOP_PREFI37/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI37/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI37/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI37/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI37}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI37}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI37:=/usr/local/hadoop} + + . $HADOOP_PREFI37/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI37/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI37/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI37/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI37}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI37}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI37:=/usr/local/hadoop} + + . $HADOOP_PREFI37/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI37/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI37/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI37/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI37}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI37}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI37:=/usr/local/hadoop} + + . $HADOOP_PREFI37/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI37/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI37/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI37/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI37}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI37}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI37:=/usr/local/hadoop} + + . $HADOOP_PREFI37/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI37/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI37/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI37/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI37}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI37}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI37:=/usr/local/hadoop} + + . $HADOOP_PREFI37/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI37/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI37/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI37/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI37}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI37}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI37:=/usr/local/hadoop} + + . $HADOOP_PREFI37/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI37/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI37/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI37/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI37}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI37}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI37:=/usr/local/hadoop} + + . $HADOOP_PREFI37/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI37/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI37/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI37/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI37}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI37}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI37:=/usr/local/hadoop} + + . $HADOOP_PREFI37/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI37/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI37/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI37/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI37/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI37/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI37/sbin/ + cd $HADOOP_PREFI37/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI37}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI37}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE37EC_DIR="$bin"/../libexec + HADOOP_LIBE37EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE37EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-38 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-38 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI38:=/usr/local/hadoop} + + . $HADOOP_PREFI38/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI38/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI38/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI38/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI38}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI38}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI38:=/usr/local/hadoop} + + . $HADOOP_PREFI38/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI38/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI38/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI38/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI38}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI38}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI38:=/usr/local/hadoop} + + . $HADOOP_PREFI38/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI38/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI38/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI38/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI38}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI38}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI38:=/usr/local/hadoop} + + . $HADOOP_PREFI38/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI38/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI38/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI38/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI38}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI38}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI38:=/usr/local/hadoop} + + . $HADOOP_PREFI38/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI38/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI38/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI38/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI38}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI38}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI38:=/usr/local/hadoop} + + . $HADOOP_PREFI38/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI38/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI38/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI38/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI38}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI38}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI38:=/usr/local/hadoop} + + . $HADOOP_PREFI38/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI38/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI38/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI38/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI38}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI38}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI38:=/usr/local/hadoop} + + . $HADOOP_PREFI38/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI38/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI38/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI38/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI38}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI38}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI38:=/usr/local/hadoop} + + . $HADOOP_PREFI38/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI38/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI38/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI38/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI38}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI38}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI38:=/usr/local/hadoop} + + . $HADOOP_PREFI38/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI38/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI38/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI38/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI38/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI38/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI38/sbin/ + cd $HADOOP_PREFI38/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI38}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI38}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE38EC_DIR="$bin"/../libexec + HADOOP_LIBE38EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE38EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-39 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-39 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI39:=/usr/local/hadoop} + + . $HADOOP_PREFI39/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI39/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI39/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI39/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI39}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI39}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI39:=/usr/local/hadoop} + + . $HADOOP_PREFI39/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI39/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI39/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI39/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI39}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI39}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI39:=/usr/local/hadoop} + + . $HADOOP_PREFI39/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI39/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI39/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI39/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI39}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI39}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI39:=/usr/local/hadoop} + + . $HADOOP_PREFI39/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI39/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI39/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI39/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI39}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI39}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI39:=/usr/local/hadoop} + + . $HADOOP_PREFI39/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI39/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI39/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI39/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI39}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI39}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI39:=/usr/local/hadoop} + + . $HADOOP_PREFI39/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI39/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI39/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI39/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI39}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI39}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI39:=/usr/local/hadoop} + + . $HADOOP_PREFI39/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI39/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI39/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI39/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI39}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI39}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI39:=/usr/local/hadoop} + + . $HADOOP_PREFI39/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI39/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI39/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI39/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI39}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI39}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI39:=/usr/local/hadoop} + + . $HADOOP_PREFI39/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI39/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI39/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI39/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI39}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI39}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI39:=/usr/local/hadoop} + + . $HADOOP_PREFI39/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI39/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI39/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI39/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI39/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI39/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI39/sbin/ + cd $HADOOP_PREFI39/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI39}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI39}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE39EC_DIR="$bin"/../libexec + HADOOP_LIBE39EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE39EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-40 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-40 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI40:=/usr/local/hadoop} + + . $HADOOP_PREFI40/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI40/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI40/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI40/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI40}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI40}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI40:=/usr/local/hadoop} + + . $HADOOP_PREFI40/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI40/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI40/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI40/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI40}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI40}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI40:=/usr/local/hadoop} + + . $HADOOP_PREFI40/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI40/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI40/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI40/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI40}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI40}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI40:=/usr/local/hadoop} + + . $HADOOP_PREFI40/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI40/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI40/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI40/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI40}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI40}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI40:=/usr/local/hadoop} + + . $HADOOP_PREFI40/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI40/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI40/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI40/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI40}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI40}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI40:=/usr/local/hadoop} + + . $HADOOP_PREFI40/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI40/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI40/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI40/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI40}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI40}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI40:=/usr/local/hadoop} + + . $HADOOP_PREFI40/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI40/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI40/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI40/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI40}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI40}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI40:=/usr/local/hadoop} + + . $HADOOP_PREFI40/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI40/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI40/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI40/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI40}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI40}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI40:=/usr/local/hadoop} + + . $HADOOP_PREFI40/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI40/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI40/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI40/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI40}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI40}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI40:=/usr/local/hadoop} + + . $HADOOP_PREFI40/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI40/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI40/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI40/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI40/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI40/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI40/sbin/ + cd $HADOOP_PREFI40/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI40}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI40}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE40EC_DIR="$bin"/../libexec + HADOOP_LIBE40EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE40EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-41 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-41 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI41:=/usr/local/hadoop} + + . $HADOOP_PREFI41/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI41/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI41/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI41/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI41}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI41}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI41:=/usr/local/hadoop} + + . $HADOOP_PREFI41/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI41/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI41/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI41/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI41}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI41}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI41:=/usr/local/hadoop} + + . $HADOOP_PREFI41/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI41/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI41/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI41/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI41}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI41}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI41:=/usr/local/hadoop} + + . $HADOOP_PREFI41/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI41/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI41/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI41/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI41}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI41}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI41:=/usr/local/hadoop} + + . $HADOOP_PREFI41/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI41/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI41/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI41/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI41}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI41}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI41:=/usr/local/hadoop} + + . $HADOOP_PREFI41/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI41/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI41/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI41/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI41}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI41}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI41:=/usr/local/hadoop} + + . $HADOOP_PREFI41/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI41/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI41/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI41/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI41}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI41}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI41:=/usr/local/hadoop} + + . $HADOOP_PREFI41/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI41/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI41/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI41/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI41}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI41}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI41:=/usr/local/hadoop} + + . $HADOOP_PREFI41/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI41/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI41/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI41/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI41}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI41}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI41:=/usr/local/hadoop} + + . $HADOOP_PREFI41/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI41/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI41/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI41/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI41/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI41/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI41/sbin/ + cd $HADOOP_PREFI41/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI41}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI41}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE41EC_DIR="$bin"/../libexec + HADOOP_LIBE41EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE41EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-42 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-42 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI42:=/usr/local/hadoop} + + . $HADOOP_PREFI42/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI42/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI42/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI42/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI42}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI42}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI42:=/usr/local/hadoop} + + . $HADOOP_PREFI42/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI42/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI42/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI42/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI42}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI42}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI42:=/usr/local/hadoop} + + . $HADOOP_PREFI42/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI42/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI42/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI42/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI42}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI42}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI42:=/usr/local/hadoop} + + . $HADOOP_PREFI42/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI42/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI42/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI42/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI42}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI42}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI42:=/usr/local/hadoop} + + . $HADOOP_PREFI42/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI42/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI42/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI42/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI42}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI42}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI42:=/usr/local/hadoop} + + . $HADOOP_PREFI42/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI42/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI42/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI42/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI42}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI42}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI42:=/usr/local/hadoop} + + . $HADOOP_PREFI42/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI42/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI42/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI42/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI42}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI42}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI42:=/usr/local/hadoop} + + . $HADOOP_PREFI42/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI42/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI42/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI42/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI42}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI42}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI42:=/usr/local/hadoop} + + . $HADOOP_PREFI42/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI42/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI42/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI42/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI42}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI42}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI42:=/usr/local/hadoop} + + . $HADOOP_PREFI42/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI42/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI42/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI42/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI42/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI42/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI42/sbin/ + cd $HADOOP_PREFI42/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI42}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI42}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE42EC_DIR="$bin"/../libexec + HADOOP_LIBE42EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE42EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-43 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-43 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI43:=/usr/local/hadoop} + + . $HADOOP_PREFI43/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI43/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI43/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI43/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI43}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI43}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI43:=/usr/local/hadoop} + + . $HADOOP_PREFI43/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI43/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI43/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI43/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI43}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI43}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI43:=/usr/local/hadoop} + + . $HADOOP_PREFI43/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI43/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI43/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI43/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI43}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI43}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI43:=/usr/local/hadoop} + + . $HADOOP_PREFI43/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI43/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI43/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI43/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI43}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI43}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI43:=/usr/local/hadoop} + + . $HADOOP_PREFI43/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI43/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI43/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI43/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI43}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI43}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI43:=/usr/local/hadoop} + + . $HADOOP_PREFI43/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI43/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI43/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI43/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI43}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI43}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI43:=/usr/local/hadoop} + + . $HADOOP_PREFI43/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI43/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI43/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI43/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI43}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI43}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI43:=/usr/local/hadoop} + + . $HADOOP_PREFI43/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI43/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI43/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI43/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI43}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI43}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI43:=/usr/local/hadoop} + + . $HADOOP_PREFI43/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI43/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI43/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI43/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI43}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI43}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI43:=/usr/local/hadoop} + + . $HADOOP_PREFI43/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI43/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI43/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI43/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI43/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI43/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI43/sbin/ + cd $HADOOP_PREFI43/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI43}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI43}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE43EC_DIR="$bin"/../libexec + HADOOP_LIBE43EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE43EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-44 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-44 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI44:=/usr/local/hadoop} + + . $HADOOP_PREFI44/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI44/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI44/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI44/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI44}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI44}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI44:=/usr/local/hadoop} + + . $HADOOP_PREFI44/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI44/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI44/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI44/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI44}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI44}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI44:=/usr/local/hadoop} + + . $HADOOP_PREFI44/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI44/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI44/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI44/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI44}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI44}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI44:=/usr/local/hadoop} + + . $HADOOP_PREFI44/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI44/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI44/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI44/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI44}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI44}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI44:=/usr/local/hadoop} + + . $HADOOP_PREFI44/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI44/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI44/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI44/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI44}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI44}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI44:=/usr/local/hadoop} + + . $HADOOP_PREFI44/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI44/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI44/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI44/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI44}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI44}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI44:=/usr/local/hadoop} + + . $HADOOP_PREFI44/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI44/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI44/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI44/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI44}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI44}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI44:=/usr/local/hadoop} + + . $HADOOP_PREFI44/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI44/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI44/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI44/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI44}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI44}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI44:=/usr/local/hadoop} + + . $HADOOP_PREFI44/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI44/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI44/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI44/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI44}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI44}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI44:=/usr/local/hadoop} + + . $HADOOP_PREFI44/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI44/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI44/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI44/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI44/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI44/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI44/sbin/ + cd $HADOOP_PREFI44/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI44}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI44}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE44EC_DIR="$bin"/../libexec + HADOOP_LIBE44EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE44EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-45 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-45 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI45:=/usr/local/hadoop} + + . $HADOOP_PREFI45/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI45/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI45/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI45/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI45}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI45}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI45:=/usr/local/hadoop} + + . $HADOOP_PREFI45/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI45/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI45/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI45/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI45}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI45}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI45:=/usr/local/hadoop} + + . $HADOOP_PREFI45/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI45/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI45/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI45/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI45}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI45}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI45:=/usr/local/hadoop} + + . $HADOOP_PREFI45/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI45/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI45/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI45/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI45}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI45}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI45:=/usr/local/hadoop} + + . $HADOOP_PREFI45/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI45/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI45/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI45/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI45}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI45}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI45:=/usr/local/hadoop} + + . $HADOOP_PREFI45/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI45/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI45/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI45/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI45}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI45}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI45:=/usr/local/hadoop} + + . $HADOOP_PREFI45/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI45/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI45/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI45/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI45}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI45}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI45:=/usr/local/hadoop} + + . $HADOOP_PREFI45/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI45/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI45/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI45/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI45}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI45}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI45:=/usr/local/hadoop} + + . $HADOOP_PREFI45/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI45/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI45/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI45/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI45}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI45}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI45:=/usr/local/hadoop} + + . $HADOOP_PREFI45/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI45/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI45/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI45/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI45/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI45/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI45/sbin/ + cd $HADOOP_PREFI45/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI45}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI45}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE45EC_DIR="$bin"/../libexec + HADOOP_LIBE45EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE45EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-46 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-46 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI46:=/usr/local/hadoop} + + . $HADOOP_PREFI46/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI46/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI46/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI46/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI46}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI46}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI46:=/usr/local/hadoop} + + . $HADOOP_PREFI46/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI46/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI46/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI46/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI46}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI46}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI46:=/usr/local/hadoop} + + . $HADOOP_PREFI46/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI46/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI46/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI46/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI46}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI46}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI46:=/usr/local/hadoop} + + . $HADOOP_PREFI46/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI46/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI46/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI46/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI46}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI46}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI46:=/usr/local/hadoop} + + . $HADOOP_PREFI46/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI46/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI46/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI46/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI46}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI46}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI46:=/usr/local/hadoop} + + . $HADOOP_PREFI46/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI46/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI46/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI46/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI46}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI46}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI46:=/usr/local/hadoop} + + . $HADOOP_PREFI46/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI46/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI46/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI46/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI46}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI46}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI46:=/usr/local/hadoop} + + . $HADOOP_PREFI46/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI46/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI46/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI46/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI46}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI46}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI46:=/usr/local/hadoop} + + . $HADOOP_PREFI46/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI46/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI46/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI46/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI46}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI46}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI46:=/usr/local/hadoop} + + . $HADOOP_PREFI46/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI46/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI46/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI46/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI46/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI46/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI46/sbin/ + cd $HADOOP_PREFI46/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI46}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI46}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE46EC_DIR="$bin"/../libexec + HADOOP_LIBE46EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE46EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-47 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-47 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI47:=/usr/local/hadoop} + + . $HADOOP_PREFI47/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI47/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI47/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI47/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI47}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI47}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI47:=/usr/local/hadoop} + + . $HADOOP_PREFI47/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI47/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI47/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI47/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI47}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI47}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI47:=/usr/local/hadoop} + + . $HADOOP_PREFI47/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI47/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI47/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI47/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI47}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI47}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI47:=/usr/local/hadoop} + + . $HADOOP_PREFI47/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI47/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI47/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI47/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI47}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI47}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI47:=/usr/local/hadoop} + + . $HADOOP_PREFI47/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI47/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI47/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI47/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI47}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI47}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI47:=/usr/local/hadoop} + + . $HADOOP_PREFI47/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI47/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI47/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI47/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI47}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI47}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI47:=/usr/local/hadoop} + + . $HADOOP_PREFI47/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI47/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI47/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI47/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI47}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI47}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI47:=/usr/local/hadoop} + + . $HADOOP_PREFI47/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI47/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI47/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI47/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI47}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI47}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI47:=/usr/local/hadoop} + + . $HADOOP_PREFI47/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI47/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI47/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI47/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI47}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI47}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI47:=/usr/local/hadoop} + + . $HADOOP_PREFI47/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI47/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI47/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI47/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI47/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI47/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI47/sbin/ + cd $HADOOP_PREFI47/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI47}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI47}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE47EC_DIR="$bin"/../libexec + HADOOP_LIBE47EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE47EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-48 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-48 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI48:=/usr/local/hadoop} + + . $HADOOP_PREFI48/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI48/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI48/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI48/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI48}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI48}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI48:=/usr/local/hadoop} + + . $HADOOP_PREFI48/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI48/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI48/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI48/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI48}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI48}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI48:=/usr/local/hadoop} + + . $HADOOP_PREFI48/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI48/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI48/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI48/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI48}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI48}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI48:=/usr/local/hadoop} + + . $HADOOP_PREFI48/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI48/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI48/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI48/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI48}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI48}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI48:=/usr/local/hadoop} + + . $HADOOP_PREFI48/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI48/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI48/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI48/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI48}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI48}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI48:=/usr/local/hadoop} + + . $HADOOP_PREFI48/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI48/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI48/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI48/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI48}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI48}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI48:=/usr/local/hadoop} + + . $HADOOP_PREFI48/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI48/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI48/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI48/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI48}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI48}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI48:=/usr/local/hadoop} + + . $HADOOP_PREFI48/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI48/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI48/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI48/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI48}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI48}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI48:=/usr/local/hadoop} + + . $HADOOP_PREFI48/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI48/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI48/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI48/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI48}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI48}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI48:=/usr/local/hadoop} + + . $HADOOP_PREFI48/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI48/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI48/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI48/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI48/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI48/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI48/sbin/ + cd $HADOOP_PREFI48/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI48}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI48}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE48EC_DIR="$bin"/../libexec + HADOOP_LIBE48EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE48EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-49 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-49 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI49:=/usr/local/hadoop} + + . $HADOOP_PREFI49/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI49/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI49/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI49/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI49}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI49}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI49:=/usr/local/hadoop} + + . $HADOOP_PREFI49/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI49/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI49/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI49/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI49}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI49}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI49:=/usr/local/hadoop} + + . $HADOOP_PREFI49/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI49/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI49/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI49/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI49}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI49}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI49:=/usr/local/hadoop} + + . $HADOOP_PREFI49/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI49/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI49/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI49/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI49}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI49}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI49:=/usr/local/hadoop} + + . $HADOOP_PREFI49/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI49/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI49/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI49/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI49}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI49}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI49:=/usr/local/hadoop} + + . $HADOOP_PREFI49/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI49/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI49/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI49/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI49}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI49}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI49:=/usr/local/hadoop} + + . $HADOOP_PREFI49/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI49/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI49/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI49/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI49}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI49}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI49:=/usr/local/hadoop} + + . $HADOOP_PREFI49/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI49/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI49/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI49/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI49}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI49}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI49:=/usr/local/hadoop} + + . $HADOOP_PREFI49/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI49/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI49/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI49/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI49}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI49}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI49:=/usr/local/hadoop} + + . $HADOOP_PREFI49/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI49/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI49/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI49/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI49/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI49/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI49/sbin/ + cd $HADOOP_PREFI49/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI49}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI49}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE49EC_DIR="$bin"/../libexec + HADOOP_LIBE49EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE49EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: Bundle +metadata: + labels: + fleet.cattle.io/commit: 047f12a5eb8e5552f2cb721b95d8f51288e01738 + fleet.cattle.io/benchmark-group: create-50-bundledeployment-500-resources + name: create-50-bundledeployment-500-resources-part-50 +spec: + defaultNamespace: create-50-bundledeployment-500-resources-part-50 + paused: true + resources: + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eighteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI50:=/usr/local/hadoop} + + . $HADOOP_PREFI50/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI50/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI50/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI50/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI50}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI50}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eighteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-eleven + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI50:=/usr/local/hadoop} + + . $HADOOP_PREFI50/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI50/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI50/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI50/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI50}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI50}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: eleven/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fifteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI50:=/usr/local/hadoop} + + . $HADOOP_PREFI50/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI50/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI50/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI50/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI50}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI50}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fifteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-fourteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI50:=/usr/local/hadoop} + + . $HADOOP_PREFI50/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI50/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI50/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI50/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI50}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI50}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: fourteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-nineteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI50:=/usr/local/hadoop} + + . $HADOOP_PREFI50/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI50/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI50/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI50/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI50}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI50}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: nineteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-seventeen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI50:=/usr/local/hadoop} + + . $HADOOP_PREFI50/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI50/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI50/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI50/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI50}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI50}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: seventeen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-sixteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI50:=/usr/local/hadoop} + + . $HADOOP_PREFI50/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI50/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI50/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI50/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI50}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI50}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: sixteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-thirteen + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI50:=/usr/local/hadoop} + + . $HADOOP_PREFI50/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI50/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI50/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI50/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI50}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI50}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: thirteen/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twelve + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI50:=/usr/local/hadoop} + + . $HADOOP_PREFI50/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI50/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI50/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI50/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI50}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI50}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twelve/cm.yaml + - content: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: bm-twenty + data: + bootstrap.sh: | + #!/bin/bash + + : ${HADOOP_PREFI50:=/usr/local/hadoop} + + . $HADOOP_PREFI50/etc/hadoop/hadoop-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hadoop-config" + + # Copy config files from volume mount + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_PREFI50/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # installing libraries if any - (resource urls added comma separated to the ACP system variable) + cd $HADOOP_PREFI50/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + mkdir -p /root/hdfs/namenode + $HADOOP_PREFI50/bin/hdfs namenode -format -force -nonInteractive + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start namenode + fi + + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + mkdir -p /root/hdfs/datanode + + # wait up to 30 seconds for namenode + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1 + + $HADOOP_PREFI50/sbin/hadoop-daemon.sh start datanode + fi + + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-rm.sh + ./start-yarn-rm.sh + fi + + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cat >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + echo '' >> $HADOOP_PREFI50/etc/hadoop/yarn-site.xml + cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFI50/sbin/ + cd $HADOOP_PREFI50/sbin + chmod +x start-yarn-nm.sh + + # wait up to 30 seconds for resourcemanager + (while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1 + + ./start-yarn-nm.sh + fi + + if [[ $1 == "-d" ]]; then + until find ${HADOOP_PREFI50}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_PREFI50}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + core-site.xml: | + + + + + fs.defaultFS + hdfs://foo-hadoop-hdfs-nn:9000/ + NameNode URI + + + hdfs-site.xml: | + + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888 + + + slaves: 'localhost + + ' + start-yarn-nm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + # "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + #"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + start-yarn-rm.sh: | + #!/usr/bin/env bash + + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Start all yarn daemons. Run this on master node. + + echo "starting yarn daemons" + + bin=`dirname "${BASH_SOURCE-$0}"` + bin=`cd "$bin"; pwd` + + DEFAULT_LIBE50EC_DIR="$bin"/../libexec + HADOOP_LIBE50EC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} + . $HADOOP_LIBE50EC_DIR/yarn-config.sh + + # start resourceManager + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager + # start nodeManager + # "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager + # start proxyserver + "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + foo-hadoop-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /usr/local/hadoop/etc/hadoop, + /usr/local/hadoop/share/hadoop/common/*, + /usr/local/hadoop/share/hadoop/common/lib/*, + /usr/local/hadoop/share/hadoop/hdfs/*, + /usr/local/hadoop/share/hadoop/hdfs/lib/*, + /usr/local/hadoop/share/hadoop/mapreduce/*, + /usr/local/hadoop/share/hadoop/mapreduce/lib/*, + /usr/local/hadoop/share/hadoop/yarn/*, + /usr/local/hadoop/share/hadoop/yarn/lib/* + + + + name: twenty/cm.yaml + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" diff --git a/benchmarks/assets/create-50-gitrepo-50-bundle/gitrepos.yaml b/benchmarks/assets/create-50-gitrepo-50-bundle/gitrepos.yaml new file mode 100644 index 0000000000..1072229b6a --- /dev/null +++ b/benchmarks/assets/create-50-gitrepo-50-bundle/gitrepos.yaml @@ -0,0 +1,850 @@ +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-1 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-1 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-2 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-2 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-3 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-3 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-4 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-4 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-5 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-5 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-6 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-6 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-7 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-7 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-8 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-8 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-9 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-9 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-10 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-10 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-11 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-11 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-12 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-12 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-13 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-13 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-14 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-14 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-15 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-15 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-16 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-16 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-17 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-17 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-18 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-18 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-19 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-19 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-20 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-20 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-21 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-21 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-22 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-22 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-23 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-23 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-24 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-24 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-25 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-25 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-26 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-26 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-27 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-27 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-28 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-28 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-29 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-29 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-30 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-30 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-31 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-31 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-32 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-32 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-33 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-33 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-34 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-34 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-35 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-35 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-36 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-36 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-37 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-37 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-38 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-38 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-39 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-39 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-40 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-40 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-41 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-41 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-42 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-42 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-43 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-43 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-44 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-44 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-45 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-45 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-46 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-46 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-47 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-47 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-48 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-48 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-49 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-49 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" +--- +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: bm-50-gitrepo-50-bundle-part-50 + labels: + fleet.cattle.io/group: bm-50-gitrepo-50-bundle +spec: + repo: https://github.com/rancher/fleet-test-data + branch: benchmark-suite + paths: + - benchmarks/create-50-gitrepo-50-bundle + targetNamespace: bm-50-gitrepo-50-bundle-part-50 + targets: + - clusterSelector: + matchLabels: + fleet.cattle.io/benchmark: "true" diff --git a/benchmarks/deploy_test.go b/benchmarks/deploy_test.go new file mode 100644 index 0000000000..b1e2340c1f --- /dev/null +++ b/benchmarks/deploy_test.go @@ -0,0 +1,133 @@ +package benchmarks_test + +import ( + "github.com/rancher/fleet/benchmarks/record" + "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + + "sigs.k8s.io/controller-runtime/pkg/client" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + gm "github.com/onsi/gomega/gmeasure" +) + +// These experiments measure the time it takes to deploy a bundledeployment. +// However, bundledeployments cannot exist without bundles, so we create the +// bundles first, wait for targeting to be done and then unpause the bundles. +// +// create-1-bundledeployment-10-resources +// create-50-bundledeployment-500-resources +var _ = Context("Benchmarks Deploy", func() { + var ( + clusters *v1alpha1.ClusterList + n int + ) + + BeforeEach(func() { + clusters = &v1alpha1.ClusterList{} + Expect(k8sClient.List(ctx, clusters, client.InNamespace(workspace), client.MatchingLabels{ + "fleet.cattle.io/benchmark": "true", + })).To(Succeed()) + n = len(clusters.Items) + Expect(n).ToNot(BeZero(), "you need at least one cluster labeled with fleet.cattle.io/benchmark=true") + }) + + Describe("Unpausing 1 BundleDeployments results in 10 Resources", Label("create-1-bundledeployment-10-resources"), func() { + BeforeEach(func() { + name = "create-1-bundledeployment-10-resources" + info = "creating one bundledeployment, targeting each cluster" + }) + + It("creates one bundledeployment per cluster", func() { + DeferCleanup(func() { + _, _ = k.Delete("-f", assetPath(name, "bundle.yaml")) + }) + + By("preparing the paused bundles") + _, _ = k.Apply("-f", assetPath(name, "bundle.yaml")) + Eventually(func(g Gomega) { + list := &v1alpha1.BundleDeploymentList{} + err := k8sClient.List(ctx, list, client.MatchingLabels{ + GroupLabel: name, + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(len(list.Items)).To(Equal(n)) + }).Should(Succeed()) + + experiment.MeasureDuration("TotalDuration", func() { + record.MemoryUsage(experiment, "MemDuring") + + // unpausing is part of the experiment, we don't want to miss reconciles + bundle := &v1alpha1.Bundle{} + err := k8sClient.Get(ctx, client.ObjectKey{Namespace: workspace, Name: name}, bundle) + Expect(err).ToNot(HaveOccurred()) + + orig := bundle.DeepCopy() + bundle.Spec.Paused = false + patch := client.MergeFrom(orig) + err = k8sClient.Patch(ctx, bundle, patch) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func(g Gomega) { + bundle := &v1alpha1.Bundle{} + err := k8sClient.Get(ctx, client.ObjectKey{Namespace: workspace, Name: name}, bundle) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(bundle.Status.Summary.DesiredReady).To(Equal(n)) + g.Expect(bundle.Status.Summary.Ready).To(Equal(n)) + }).Should(Succeed()) + }, gm.Style("{{bold}}")) + }) + }) + + Describe("Unpausing 50 BundleDeployments results in 500 Resources", Label("create-50-bundledeployment-500-resources"), func() { + BeforeEach(func() { + name = "create-50-bundledeployment-500-resources" + info = "creating one bundledeployment, targeting each cluster" + }) + + It("creates one bundledeployment", func() { + DeferCleanup(func() { + _, _ = k.Delete("-f", assetPath(name, "bundles.yaml")) + }) + + By("preparing the paused bundles") + _, _ = k.Apply("-f", assetPath(name, "bundles.yaml")) + Eventually(func(g Gomega) { + list := &v1alpha1.BundleDeploymentList{} + err := k8sClient.List(ctx, list, client.MatchingLabels{ + GroupLabel: name, + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(len(list.Items)).To(Equal(n * 50)) + }).Should(Succeed()) + + experiment.MeasureDuration("TotalDuration", func() { + record.MemoryUsage(experiment, "MemDuring") + + list := &v1alpha1.BundleList{} + err := k8sClient.List(ctx, list, client.MatchingLabels{ + GroupLabel: name, + }) + Expect(err).ToNot(HaveOccurred()) + for _, bundle := range list.Items { + orig := bundle.DeepCopy() + bundle.Spec.Paused = false + patch := client.MergeFrom(orig) + err = k8sClient.Patch(ctx, &bundle, patch) + Expect(err).ToNot(HaveOccurred()) + } + + Eventually(func(g Gomega) { + for _, c := range clusters.Items { + cluster := &v1alpha1.Cluster{} + err := k8sClient.Get(ctx, client.ObjectKey{Namespace: workspace, Name: c.Name}, cluster) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.Summary.DesiredReady).To(Equal(n*50 + 1)) + // we expect the agent to be ready as well + g.Expect(cluster.Status.Summary.Ready).To(Equal(n*50 + 1)) + } + }).Should(Succeed()) + }, gm.Style("{{bold}}")) + }) + }) +}) diff --git a/benchmarks/gitrepo_bundle_test.go b/benchmarks/gitrepo_bundle_test.go new file mode 100644 index 0000000000..768e3a4902 --- /dev/null +++ b/benchmarks/gitrepo_bundle_test.go @@ -0,0 +1,138 @@ +// Package benchmarks is used to benchmark the performance of the controllers +// against an existing Fleet installation. Each experiment aligns to a bundle +// lifecycles. Experiments might have requirements, like the number of clusters +// in an installation. The experiments create a resource and wait for Fleet to +// reconcile it. Experiments collect multiple metrics, like the number and +// duration of reconciliations, the overall duration of the experiment, the +// number of created k8s resources and the CPU and memory usage of the +// controllers. +package benchmarks_test + +import ( + gm "github.com/onsi/gomega/gmeasure" + + "github.com/rancher/fleet/benchmarks/record" + "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + + "sigs.k8s.io/controller-runtime/pkg/client" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// create-1-gitrepo-1-bundle +// create-1-gitrepo-1-big-bundle +// create-1-gitrepo-50-bundle +// create-50-gitrepo-50-bundle +var _ = Context("Benchmarks GitOps", func() { + Describe("Adding 1 GitRepo results in 1 Bundle", Label("create-1-gitrepo-1-bundle"), func() { + BeforeEach(func() { + name = "create-1-gitrepo-1-bundle" + info = `creating one bundle from one gitrepo + + This test is influenced by the network connection to the Git repository server. + ` + }) + + It("creates a Bundle", func() { + DeferCleanup(func() { + _, _ = k.Delete("-f", assetPath(name, "gitrepo.yaml")) + }) + + experiment.MeasureDuration("TotalDuration", func() { + record.MemoryUsage(experiment, "MemDuring") + + _, _ = k.Apply("-f", assetPath(name, "gitrepo.yaml")) + Eventually(func(g Gomega) { + err := k8sClient.Get(ctx, client.ObjectKey{ + Namespace: workspace, + Name: "bm-1-gitrepo-1-bundle-benchmarks-create-1-gitre-773b4", + }, &v1alpha1.Bundle{}) + g.Expect(err).ToNot(HaveOccurred()) + }).Should(Succeed()) + }, gm.Style("{{bold}}")) + + }) + }) + + Describe("Adding 1 GitRepo results in 1 big Bundle", Label("create-1-gitrepo-1-big-bundle"), func() { + BeforeEach(func() { + name = "create-1-gitrepo-1-big-bundle" + info = "creating one big bundle from one GitRepo" + }) + + It("creates a big bundle", func() { + DeferCleanup(func() { + _, _ = k.Delete("-f", assetPath(name, "gitrepo.yaml")) + }) + + experiment.MeasureDuration("TotalDuration", func() { + record.MemoryUsage(experiment, "MemDuring") + + _, _ = k.Apply("-f", assetPath(name, "gitrepo.yaml")) + Eventually(func(g Gomega) { + err := k8sClient.Get(ctx, client.ObjectKey{ + Namespace: workspace, + Name: "bm-1-gitrepo-1-big-bundle-benchmarks-create-1-g-84854", + }, &v1alpha1.Bundle{}) + g.Expect(err).ToNot(HaveOccurred()) + }).Should(Succeed()) + }, gm.Style("{{bold}}")) + + }) + }) + + Describe("Adding 1 GitRepo results in 50 Bundles", Label("create-1-gitrepo-50-bundle"), func() { + BeforeEach(func() { + name = "create-1-gitrepo-50-bundle" + info = "creating 50 bundles from one GitRepo" + }) + + It("creates 50 bundles", func() { + DeferCleanup(func() { + _, _ = k.Delete("-f", assetPath(name, "gitrepo.yaml")) + }) + + experiment.MeasureDuration("TotalDuration", func() { + record.MemoryUsage(experiment, "MemDuring") + + _, _ = k.Apply("-f", assetPath(name, "gitrepo.yaml")) + Eventually(func(g Gomega) { + list := &v1alpha1.BundleList{} + err := k8sClient.List(ctx, list, client.InNamespace(workspace), client.MatchingLabels{ + v1alpha1.RepoLabel: "bm-1-gitrepo-50-bundle", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(len(list.Items)).To(Equal(50)) + }).Should(Succeed()) + }, gm.Style("{{bold}}")) + }) + }) + + Describe("Adding 50 GitRepos results in 50 Bundles", Label("create-50-gitrepo-50-bundle"), func() { + BeforeEach(func() { + name = "create-50-gitrepo-50-bundle" + info = "creating 50 bundles from 50 GitRepos" + }) + + It("creates 50 bundles", func() { + DeferCleanup(func() { + _, _ = k.Delete("-f", assetPath(name, "gitrepos.yaml")) + }) + + experiment.MeasureDuration("TotalDuration", func() { + record.MemoryUsage(experiment, "MemDuring") + + _, _ = k.Apply("-f", assetPath(name, "gitrepos.yaml")) + Eventually(func(g Gomega) { + list := &v1alpha1.BundleList{} + err := k8sClient.List(ctx, list, client.InNamespace(workspace), client.MatchingLabels{ + "fleet.cattle.io/group": "bm-50-gitrepo-50-bundle", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(len(list.Items)).To(Equal(50)) + }).Should(Succeed()) + }, gm.Style("{{bold}}")) + }) + }) +}) diff --git a/benchmarks/record/record.go b/benchmarks/record/record.go new file mode 100644 index 0000000000..a593b509c7 --- /dev/null +++ b/benchmarks/record/record.go @@ -0,0 +1,339 @@ +package record + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/hex" + "fmt" + "maps" + "math/rand/v2" + "runtime" + "slices" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + gm "github.com/onsi/gomega/gmeasure" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + + "github.com/rancher/fleet/e2e/testenv/kubectl" + "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/resource" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + k kubectl.Command + k8sClient client.Client + workspace string +) + +func Setup(w string, k8s client.Client, kcmd kubectl.Command) { + workspace = w + k8sClient = k8s + k = kcmd +} + +func MemoryUsage(experiment *gm.Experiment, name string) { + var m runtime.MemStats + runtime.ReadMemStats(&m) + experiment.RecordValue(name, float64(m.Alloc/1024/1024), gm.Precision(0), gm.Units("MB")) +} + +func ResourceCount(ctx context.Context, experiment *gm.Experiment, name string) { + n := 0 + + clusters := &v1alpha1.ClusterList{} + Expect(k8sClient.List(ctx, clusters, client.InNamespace(workspace))).To(Succeed()) + n += len(clusters.Items) + + clusterGroups := &v1alpha1.ClusterGroupList{} + Expect(k8sClient.List(ctx, clusterGroups, client.InNamespace(workspace))).To(Succeed()) + n += len(clusterGroups.Items) + + gitRepos := &v1alpha1.GitRepoList{} + Expect(k8sClient.List(ctx, gitRepos, client.InNamespace(workspace))).To(Succeed()) + n += len(gitRepos.Items) + + contents := &v1alpha1.ContentList{} + Expect(k8sClient.List(ctx, contents, client.InNamespace(workspace))).To(Succeed()) + n += len(contents.Items) + + bundles := &v1alpha1.BundleList{} + Expect(k8sClient.List(ctx, bundles, client.InNamespace(workspace))).To(Succeed()) + n += len(bundles.Items) + + deployments := &v1alpha1.BundleDeploymentList{} + Expect(k8sClient.List(ctx, deployments, client.InNamespace(workspace))).To(Succeed()) + n += len(deployments.Items) + + serviceAccounts := &corev1.ServiceAccountList{} + Expect(k8sClient.List(ctx, serviceAccounts, client.InNamespace(workspace))).To(Succeed()) + n += len(serviceAccounts.Items) + + roles := &rbacv1.RoleList{} + Expect(k8sClient.List(ctx, roles, client.InNamespace(workspace))).To(Succeed()) + n += len(roles.Items) + + roleBindings := &rbacv1.RoleBindingList{} + Expect(k8sClient.List(ctx, roleBindings, client.InNamespace(workspace))).To(Succeed()) + n += len(roleBindings.Items) + + clusterRoles := &rbacv1.ClusterRoleList{} + Expect(k8sClient.List(ctx, clusterRoles)).To(Succeed()) + n += len(clusterRoles.Items) + + clusterRoleBindings := &rbacv1.ClusterRoleBindingList{} + Expect(k8sClient.List(ctx, clusterRoleBindings)).To(Succeed()) + n += len(clusterRoleBindings.Items) + + deps := &appsv1.DeploymentList{} + Expect(k8sClient.List(ctx, deps, client.InNamespace(workspace))).To(Succeed()) + n += len(deps.Items) + + statefulSets := &appsv1.StatefulSetList{} + Expect(k8sClient.List(ctx, statefulSets, client.InNamespace(workspace))).To(Succeed()) + n += len(statefulSets.Items) + + pods := &corev1.PodList{} + Expect(k8sClient.List(ctx, pods, client.InNamespace(workspace))).To(Succeed()) + n += len(pods.Items) + + jobs := &batchv1.JobList{} + Expect(k8sClient.List(ctx, jobs, client.InNamespace(workspace))).To(Succeed()) + n += len(jobs.Items) + + services := &corev1.ServiceList{} + Expect(k8sClient.List(ctx, services, client.InNamespace(workspace))).To(Succeed()) + n += len(services.Items) + + configMaps := &corev1.ConfigMapList{} + Expect(k8sClient.List(ctx, configMaps, client.InNamespace(workspace))).To(Succeed()) + n += len(configMaps.Items) + + secrets := &corev1.SecretList{} + Expect(k8sClient.List(ctx, secrets, client.InNamespace(workspace))).To(Succeed()) + n += len(secrets.Items) + + volumes := &corev1.PersistentVolumeList{} + Expect(k8sClient.List(ctx, volumes)).To(Succeed()) + n += len(volumes.Items) + + ingresses := &networkingv1.IngressList{} + Expect(k8sClient.List(ctx, ingresses, client.InNamespace(workspace))).To(Succeed()) + n += len(ingresses.Items) + + namespaces := &corev1.NamespaceList{} + Expect(k8sClient.List(ctx, namespaces)).To(Succeed()) + n += len(namespaces.Items) + + experiment.RecordValue(name, float64(n), gm.Precision(0), gm.Units("resources")) +} + +func CRDCount(ctx context.Context, setup *gm.Experiment, name string) { + crds := &apiextv1.CustomResourceDefinitionList{} + Expect(k8sClient.List(ctx, crds)).To(Succeed()) + setup.RecordValue(name, float64(len(crds.Items)), gm.Precision(0), gm.Units("CRDs")) +} + +func Clusters(ctx context.Context, setup *gm.Experiment) { + clusters := &v1alpha1.ClusterList{} + Expect(k8sClient.List(ctx, clusters, client.InNamespace(workspace))).To(Succeed()) + setup.RecordValue("ClusterCount", float64(len(clusters.Items)), gm.Precision(0), gm.Units("clusters")) +} + +func Nodes(ctx context.Context, experiment *gm.Experiment) { + nodes := &corev1.NodeList{} + Expect(k8sClient.List(ctx, nodes)).To(Succeed()) + experiment.RecordValue("NodeCount", float64(len(nodes.Items)), gm.Precision(0), gm.Units("nodes")) + + var sb strings.Builder + sb.WriteString("CPU, Memory, Pods\n") + cpu := resource.NewQuantity(0, resource.DecimalSI) + mem := resource.NewQuantity(0, resource.DecimalSI) + pods := resource.NewQuantity(0, resource.DecimalSI) + images := make(map[string]struct{}) + for _, node := range nodes.Items { + cpu.Add(*node.Status.Capacity.Cpu()) + mem.Add(*node.Status.Capacity.Memory()) + pods.Add(*node.Status.Capacity.Pods()) + for _, image := range node.Status.Images { + name := "" + if len(image.Names) == 0 { + continue + } else if len(image.Names) > 1 { + name = image.Names[1] + } else { + name = image.Names[0] + } + images[name] = struct{}{} + } + sb.WriteString(fmt.Sprintf("%s, %s, %s\n", + node.Status.Capacity.Cpu().String(), + node.Status.Capacity.Memory().String(), + node.Status.Capacity.Pods().String())) + } + experiment.RecordNote(Header("Node Resources")+sb.String(), gm.Style("{{green}}")) + img := strings.Join(slices.Sorted(maps.Keys(images)), ", ") + experiment.RecordNote(Header("Images")+img, gm.Style("{{green}}")) + sumCPU, _ := cpu.AsInt64() + experiment.RecordValue("SumCPU", float64(sumCPU), gm.Precision(0), gm.Units("cores")) + sumMem, _ := mem.AsInt64() + experiment.RecordValue("SumMem", float64(sumMem/1024/1024), gm.Precision(0), gm.Units("MB")) + sumPods, _ := pods.AsInt64() + experiment.RecordValue("SumPods", float64(sumPods), gm.Precision(0), gm.Units("pods")) +} + +func Header(s string) string { + h := fmt.Sprintf("{{bold}}%s{{/}}\n", s) + h += strings.Repeat("=", len(s)) + "\n" + return h +} + +func Metrics(experiment *gm.Experiment, suffix string) { + res := map[string]float64{} + + getMetrics(res, "monitoring-fleet-controller.cattle-fleet-system.svc.cluster.local:8080/metrics", "bundle", "bundledeployment", "cluster", "clustergroup", "imagescan") + + getMetrics(res, "monitoring-gitjob.cattle-fleet-system.svc.cluster.local:8081/metrics", "GitRepoStatus", "gitrepo") + + for k, v := range res { + n := k + suffix + switch k { + case "ReconcileErrors", "ReconcileRequeue", "ReconcileRequeueAfter", "ReconcileSuccess": + experiment.RecordValue(n, v, gm.Precision(0), gm.Units("reconciles")) + case "GCDuration", "CPU", "ReconcileTime", "WorkqueueQueueDuration", "WorkqueueWorkDuration": + experiment.RecordValue(n, v, gm.Precision(1), gm.Units("seconds")) + case "WorkqueueAdds", "WorkqueueRetries": + experiment.RecordValue(n, v, gm.Precision(0), gm.Units("items")) + case "NetworkRX", "NetworkTX": + experiment.RecordValue(n, v, gm.Precision(0), gm.Units("bytes")) + default: + experiment.RecordValue(n, v, gm.Precision(0)) + } + } +} + +func getMetrics(res map[string]float64, url string, controllers ...string) { + pod := addRandomSuffix("curl") + var ( + mfs map[string]*dto.MetricFamily + parser expfmt.TextParser + ) + Eventually(func() error { + GinkgoWriter.Print("Fetching metrics from " + url + "\n") + out, err := k.Run("run", "--rm", "--attach", "--quiet", "--restart=Never", pod, "--image=curlimages/curl", "--namespace", "cattle-fleet-system", "--command", "--", "curl", "-s", url) + if err != nil { + return err + } + + mfs, err = parser.TextToMetricFamilies(bytes.NewBufferString(out)) + if err != nil { + return err + } + + if _, ok := mfs["controller_runtime_reconcile_total"]; !ok { + return fmt.Errorf("controller_runtime_reconcile_total not found") + } + + return nil + }).Should(Succeed()) + + extractFromMetricFamilies(res, controllers, mfs) +} + +// addRandomSuffix adds a random suffix to a given name. +func addRandomSuffix(name string) string { + p := make([]byte, 4) + binary.LittleEndian.PutUint32(p, rand.Uint32()) + + return fmt.Sprintf("%s-%s", name, hex.EncodeToString(p)) +} + +func extractFromMetricFamilies(res map[string]float64, controllers []string, mfs map[string]*dto.MetricFamily) { + // controller_runtime_reconcile_total{controller="gitrepo",result="error"} 0 + // controller_runtime_reconcile_total{controller="gitrepo",result="requeue"} 71 + // controller_runtime_reconcile_total{controller="gitrepo",result="requeue_after"} 155 + // controller_runtime_reconcile_total{controller="gitrepo",result="success"} 267 + mf := mfs["controller_runtime_reconcile_total"] + for _, m := range mf.Metric { + l := m.GetLabel() + for _, c := range controllers { + if l[0].GetValue() == c { + v := m.Counter.GetValue() + switch l[1].GetValue() { + case "error": + res["ReconcileErrors"] += v + case "requeue": + res["ReconcileRequeue"] += v + case "requeue_after": + res["ReconcileRequeueAfter"] += v + case "success": + res["ReconcileSuccess"] += v + + } + } + } + } + + // controller_runtime_reconcile_time_seconds_sum{controller="gitrepo"} 185.52245399500018 + mf = mfs["controller_runtime_reconcile_time_seconds"] + incMetric(res, "ReconcileTime", controllers, *mf.Type, mf.Metric) + + mf = mfs["workqueue_adds_total"] + incMetric(res, "WorkqueueAdds", controllers, *mf.Type, mf.Metric) + + mf = mfs["workqueue_queue_duration_seconds"] + incMetric(res, "WorkqueueQueueDuration", controllers, *mf.Type, mf.Metric) + + mf = mfs["workqueue_retries_total"] + incMetric(res, "WorkqueueRetries", controllers, *mf.Type, mf.Metric) + + mf = mfs["workqueue_work_duration_seconds"] + incMetric(res, "WorkqueueWorkDuration", controllers, *mf.Type, mf.Metric) + + for _, m := range mfs["go_gc_duration_seconds"].Metric { + res["GCDuration"] += m.Summary.GetSampleSum() + } + + for _, m := range mfs["process_cpu_seconds_total"].Metric { + res["CPU"] += m.Counter.GetValue() + } + + for _, m := range mfs["process_network_receive_bytes_total"].Metric { + res["NetworkRX"] += m.Counter.GetValue() + } + + for _, m := range mfs["process_network_transmit_bytes_total"].Metric { + res["NetworkTX"] += m.Counter.GetValue() + } +} + +func incMetric(res map[string]float64, name string, controllers []string, t dto.MetricType, metrics []*dto.Metric) { + for _, m := range metrics { + l := m.GetLabel() + for _, c := range controllers { + if l[0].GetValue() == c { + switch t { + case dto.MetricType_COUNTER: + res[name] += m.Counter.GetValue() + case dto.MetricType_GAUGE: + res[name] += m.Gauge.GetValue() + case dto.MetricType_SUMMARY: + res[name] += m.Summary.GetSampleSum() + case dto.MetricType_HISTOGRAM: + res[name] += m.Histogram.GetSampleSum() + } + } + } + } +} diff --git a/benchmarks/report/report.go b/benchmarks/report/report.go new file mode 100644 index 0000000000..0b7be87577 --- /dev/null +++ b/benchmarks/report/report.go @@ -0,0 +1,244 @@ +package report + +import ( + "encoding/json" + "fmt" + "maps" + "slices" + "strings" + + "github.com/onsi/ginkgo/v2" + gm "github.com/onsi/gomega/gmeasure" + "github.com/onsi/gomega/gmeasure/table" + + "gonum.org/v1/gonum/stat" +) + +type Summary struct { + Description string + Experiments map[string]Experiment + Setup map[string]Measurement +} + +type Measurement struct { + Value float64 + Type gm.MeasurementType + PrecisionBundle gm.PrecisionBundle + Style string + Units string +} + +// Experiment is a set of measurements, like from 50-gitrepo-1-bundle +// Measurements from the report are one dimensional, as most experiments don't +// use sampling +type Experiment struct { + Measurements map[string]Measurement +} + +// ColorableString for ReportEntry to use +func (s Summary) ColorableString() string { + sb := "{{green}}Experiments{{/}}\n" + keys := slices.Sorted(maps.Keys(s.Experiments)) + for _, k := range keys { + v := s.Experiments[k] + sb += fmt.Sprintf("{{green}}%s{{/}}\n", k) + t2 := newTable(v.Measurements) + sb += t2.Render() + sb += "\n" + } + sb += "{{green}}Environment{{/}}\n" + sb += s.Description + sb += "\n" + t1 := newTable(s.Setup) + sb += t1.Render() + sb += "\n" + return sb +} + +// non-colorable String() is used by go's string formatting support but ignored by ReportEntry +func (s Summary) String() string { + return fmt.Sprintf("%s\n%s\n%s\n", s.Description, prettyPrint(s.Setup), prettyPrint(s.Experiments)) +} + +func newTable(measurements map[string]Measurement) *table.Table { + t := table.NewTable() + t.AppendRow(table.R( + table.C("Measurement"), table.C("Value"), table.C("Unit"), + table.Divider("="), + "{{bold}}", + )) + + keys := slices.Sorted(maps.Keys(measurements)) + for _, k := range keys { + m := measurements[k] + + r := table.R(m.Style) + t.AppendRow(r) + r.AppendCell(table.C(k)) + r.AppendCell(table.C(fmt.Sprintf(m.PrecisionBundle.ValueFormat, m.Value))) + r.AppendCell(table.C(m.Units)) + + } + + return t +} + +func New(r ginkgo.Report) (*Summary, bool) { + s := &Summary{ + Experiments: map[string]Experiment{}, + Setup: map[string]Measurement{}, + } + + for _, specReport := range r.SpecReports { + if specReport.Failed() { + return nil, false + } + + // handle values from actual experiments, all experiments have labels + if len(specReport.ContainerHierarchyLabels) <= 1 { + continue + } + + for _, entry := range specReport.ReportEntries { + + e := Experiment{ + Measurements: map[string]Measurement{}, + } + + raw := entry.GetRawValue() + xp, ok := raw.(*gm.Experiment) + if !ok { + fmt.Printf("failed to access report: %#v\n", entry) + continue + } + + for _, m := range xp.Measurements { + name, v := extract(m) + if name == "" { + continue + } + + tmp, ok := e.Measurements[name] + if ok { + tmp.Value += v + } else { + tmp = Measurement{ + Value: v, + Type: m.Type, + PrecisionBundle: m.PrecisionBundle, + Style: m.Style, + Units: m.Units, + } + } + e.Measurements[name] = tmp + } + s.Experiments[entry.Name] = e + } + } + + for _, specReport := range r.SpecReports { + if len(specReport.ContainerHierarchyLabels) > 1 { + continue + } + + // handle setup entries + for _, entry := range specReport.ReportEntries { + if entry.Name != "setup" { + continue + } + + raw := entry.GetRawValue() + xp, ok := raw.(*gm.Experiment) + if !ok { + return nil, false + } + + if xp.Name != "beforeSetup" && xp.Name != "afterSetup" { + continue + } + + for _, m := range xp.Measurements { + name, v := extract(m) + if name != "" { + tmp, ok := s.Setup[name] + if ok { + tmp.Value += v + } else { + tmp = Measurement{ + Value: v, + Type: m.Type, + PrecisionBundle: m.PrecisionBundle, + Style: m.Style, + Units: m.Units, + } + } + s.Setup[name] = tmp + } else if m.Type == gm.MeasurementTypeNote { + s.Description += "\n" + lines := strings.Split(strings.Trim(m.Note, "\n"), "\n") + for i := range lines { + s.Description += fmt.Sprintf("%s\n", lines[i]) + } + } + } + s.Description += "\n" + } + break + } + + return s, true +} + +func extract(m gm.Measurement) (string, float64) { + var v float64 + + switch m.Type { + case gm.MeasurementTypeValue: + if len(m.Values) < 1 { + return "", 0 + } + v = m.Values[0] + + case gm.MeasurementTypeDuration: + if len(m.Durations) < 1 { + return "", 0 + } + v = m.Durations[0].Seconds() + fmt.Printf("duration: %#v %f\n", m.Durations, v) + + default: + return "", 0 + } + + name := m.Name + + // MemDuring is actually sampled, not a single value + if m.Name == "MemDuring" { + v = stat.Mean(m.Values, nil) + } else if beforeAfterName(name) { + if strings.HasSuffix(m.Name, "Before") { + name = strings.TrimSuffix(m.Name, "Before") + v = -v + } else { + name = strings.TrimSuffix(m.Name, "After") + } + } + + return name, v +} + +// special handling for Before/After suffixes +func beforeAfterName(name string) bool { + if strings.HasSuffix(name, "Before") { + return true + } + if strings.HasSuffix(name, "After") { + return true + } + return false +} + +func prettyPrint(i interface{}) string { + s, _ := json.MarshalIndent(i, "", "\t") + return string(s) +} diff --git a/benchmarks/suite_test.go b/benchmarks/suite_test.go new file mode 100644 index 0000000000..2431698cb1 --- /dev/null +++ b/benchmarks/suite_test.go @@ -0,0 +1,177 @@ +package benchmarks_test + +import ( + "context" + "os" + "path" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + gm "github.com/onsi/gomega/gmeasure" + + "github.com/rancher/fleet/benchmarks/record" + "github.com/rancher/fleet/benchmarks/report" + "github.com/rancher/fleet/e2e/testenv/kubectl" + "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiruntime "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // GroupLabel is used on bundles. One cannot + // use v1alpha1.RepoLabel because fleet 0.9 deletes bundles with an + // invalid repo label. However, bundle labels are propagated to + // bundledeployments. + GroupLabel = "fleet.cattle.io/benchmark-group" + + // BenchmarkLabel is set to "true" on clusters that should be included + // in the benchmark. + BenchmarkLabel = "fleet.cattle.io/benchmark" +) + +var ( + ctx context.Context + cancel context.CancelFunc + + k8sClient client.Client + k kubectl.Command + + root = ".." + scheme = apiruntime.NewScheme() + + // experiments + name string + info string + experiment *gm.Experiment + + // cluster registration namespace, contains clusters + workspace string + + // metrics toggles metrics reporting, old fleet versions don't have + // metrics + metrics bool +) + +// TestBenchmarkSuite runs the benchmark suite for Fleet. +// +// Inputs for this benchmark suite via env vars: +// * cluster registration namespace, contains clusters +// * timeout for eventually +// * if metrics should be recorded +func TestBenchmarkSuite(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Fleet Benchmark Suite") +} + +// this will run after BeforeEach, but before the actual experiment +var _ = JustBeforeEach(func() { + experiment = gm.NewExperiment(name) + AddReportEntry(experiment.Name, experiment, ReportEntryVisibilityNever) + experiment.RecordNote(record.Header("Info")+info, gm.Style("{{green}}")) + record.MemoryUsage(experiment, "MemBefore") + record.ResourceCount(ctx, experiment, "ResourceCountBefore") + if metrics { + record.Metrics(experiment, "Before") + } +}) + +// this will run after DeferClean, so clean up is not included in the measurements +var _ = AfterEach(func() { + record.MemoryUsage(experiment, "MemAfter") + record.ResourceCount(ctx, experiment, "ResourceCountAfter") + if metrics { + record.Metrics(experiment, "After") + } +}) + +var _ = BeforeSuite(func() { + metrics = os.Getenv("FLEET_BENCH_METRICS") == "true" + + tm := os.Getenv("FLEET_BENCH_TIMEOUT") + if tm == "" { + tm = "2m" + } + dur, err := time.ParseDuration(tm) + Expect(err).NotTo(HaveOccurred(), "failed to parse timeout duration: "+tm) + SetDefaultEventuallyTimeout(dur) + SetDefaultEventuallyPollingInterval(1 * time.Second) + + ctx, cancel = context.WithCancel(context.TODO()) + + workspace = os.Getenv("FLEET_BENCH_NAMESPACE") + if workspace == "" { + workspace = "fleet-local" + } + + // client for assets + k = kubectl.New("", workspace) + + // client for assertions + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(apiextv1.AddToScheme(scheme)) + + cfg := ctrl.GetConfigOrDie() + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme, Cache: nil}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + atLeastOneCluster() + + record.Setup(workspace, k8sClient, k) + + // describe the environment this suite is running against + e := gm.NewExperiment("beforeSetup") + record.MemoryUsage(e, "MemBefore") + record.ResourceCount(ctx, e, "ResourceCountBefore") + record.CRDCount(ctx, e, "CRDCount") + record.Nodes(ctx, e) + record.Clusters(ctx, e) + if metrics { + record.Metrics(e, "") + } + + version, err := k.Run("version") + Expect(err).NotTo(HaveOccurred()) + e.RecordNote(record.Header("Kubernetes Version") + version) + AddReportEntry("setup", e, ReportEntryVisibilityNever) +}) + +var _ = AfterSuite(func() { + e := gm.NewExperiment("afterSetup") + record.MemoryUsage(e, "MemAfter") + record.ResourceCount(ctx, e, "ResourceCountAfter") + AddReportEntry("setup", e, ReportEntryVisibilityNever) + + cancel() +}) + +var _ = ReportAfterSuite("Summary", func(r Report) { + if summary, ok := report.New(r); ok { + AddReportEntry("summary", summary) + } +}) + +// atLeastOneCluster validates that the workspace has at least one cluster. +func atLeastOneCluster() { + GinkgoHelper() + + list := &v1alpha1.ClusterList{} + err := k8sClient.List(ctx, list, client.InNamespace(workspace), client.MatchingLabels{BenchmarkLabel: "true"}) + Expect(err).ToNot(HaveOccurred(), "failed to list clusters") + Expect(len(list.Items)).To(BeNumerically(">=", 1)) +} + +// assetPath returns the path to an asset +func assetPath(p ...string) string { + parts := append([]string{root, "benchmarks", "assets"}, p...) + return path.Join(parts...) +} diff --git a/benchmarks/targeting_test.go b/benchmarks/targeting_test.go new file mode 100644 index 0000000000..194c6f29fd --- /dev/null +++ b/benchmarks/targeting_test.go @@ -0,0 +1,80 @@ +package benchmarks_test + +import ( + "github.com/rancher/fleet/benchmarks/record" + "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + + "sigs.k8s.io/controller-runtime/pkg/client" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + gm "github.com/onsi/gomega/gmeasure" +) + +// create-1-bundle +// create-50-bundle +var _ = Context("Benchmarks Targeting", func() { + var n int + + BeforeEach(func() { + clusters := &v1alpha1.ClusterList{} + Expect(k8sClient.List(ctx, clusters, client.InNamespace(workspace), client.MatchingLabels{ + "fleet.cattle.io/benchmark": "true", + })).To(Succeed()) + n = len(clusters.Items) + }) + + Describe("Adding 1 Bundle results in BundleDeployments", Label("create-1-bundle"), func() { + BeforeEach(func() { + name = "create-1-bundle" + info = "creating one bundle targeting each cluster" + }) + + It("creates one bundledeployment", func() { + DeferCleanup(func() { + _, _ = k.Delete("-f", assetPath(name, "bundle.yaml")) + }) + + experiment.MeasureDuration("TotalDuration", func() { + record.MemoryUsage(experiment, "MemDuring") + + _, _ = k.Apply("-f", assetPath(name, "bundle.yaml")) + Eventually(func(g Gomega) { + list := &v1alpha1.BundleDeploymentList{} + err := k8sClient.List(ctx, list, client.MatchingLabels{ + GroupLabel: name, + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(len(list.Items)).To(Equal(n)) + }).Should(Succeed()) + }, gm.Style("{{bold}}")) + }) + }) + + Describe("Adding 50 Bundles", Label("create-50-bundle"), func() { + BeforeEach(func() { + name = "create-50-bundle" + info = "creating 50 bundles targeting each cluster" + }) + + It("creates 50 bundledeployments", func() { + DeferCleanup(func() { + _, _ = k.Delete("-f", assetPath(name, "bundles.yaml")) + }) + + experiment.MeasureDuration("TotalDuration", func() { + record.MemoryUsage(experiment, "MemDuring") + + _, _ = k.Apply("-f", assetPath(name, "bundles.yaml")) + Eventually(func(g Gomega) { + list := &v1alpha1.BundleDeploymentList{} + err := k8sClient.List(ctx, list, client.MatchingLabels{ + GroupLabel: name, + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(len(list.Items)).To(Equal(n * 50)) + }).Should(Succeed()) + }, gm.Style("{{bold}}")) + }) + }) +}) diff --git a/dev/benchmarks.sh b/dev/benchmarks.sh new file mode 100755 index 0000000000..f1dac8db64 --- /dev/null +++ b/dev/benchmarks.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -e + +date=$(date +"%F_%T") +out="b-$date.json" +FLEET_BENCH_REPORT=${FLEET_BENCH_DB-$out} +FLEET_BENCH_DB=${FLEET_BENCH_DB-"benchmarks/db"} +FLEET_BENCH_TIMEOUT=${FLEET_BENCH_TIMEOUT-"5m"} +FLEET_BENCH_NAMESPACE=${FLEET_BENCH_NAMESPACE-"fleet-local"} +FLEET_BENCH_METRICS=${FLEET_BENCH_METRICS-"true"} + +export FLEET_BENCH_TIMEOUT +export FLEET_BENCH_NAMESPACE +export FLEET_BENCH_METRICS + +n=$(kubectl get clusters.fleet.cattle.io -n "$FLEET_BENCH_NAMESPACE" -l fleet.cattle.io/benchmark=true -ojson | jq '.items | length') +if [ "$n" -eq 0 ]; then + echo "No clusters found to benchmark" + echo "You need at least one cluster with the label fleet.cattle.io/benchmark=true" + echo + echo "Example:" + echo "kubectl label clusters.fleet.cattle.io -n fleet-local local fleet.cattle.io/benchmark=true" + exit 1 +fi + +date=$(date +"%F_%T") +out="b-$date.json" +ginkgo run --fail-fast --seed 1731598958 --json-report "$FLEET_BENCH_REPORT" ./benchmarks diff --git a/go.mod b/go.mod index e7d050414e..37db6c8755 100644 --- a/go.mod +++ b/go.mod @@ -49,6 +49,7 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/crypto v0.30.0 golang.org/x/sync v0.10.0 + gonum.org/v1/gonum v0.15.1 gopkg.in/go-playground/webhooks.v5 v5.17.0 gopkg.in/yaml.v2 v2.4.0 gotest.tools v2.2.0+incompatible @@ -223,14 +224,14 @@ require ( go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect - golang.org/x/mod v0.21.0 // indirect + golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.32.0 // indirect golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.6.0 // indirect - golang.org/x/tools v0.26.0 // indirect + golang.org/x/tools v0.27.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/api v0.149.0 // indirect diff --git a/go.sum b/go.sum index 9708015a47..12e9049aae 100644 --- a/go.sum +++ b/go.sum @@ -917,8 +917,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1185,8 +1185,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1198,6 +1198,8 @@ golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3j golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= +gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=