Skip to content

Commit

Permalink
Add benchmark suite
Browse files Browse the repository at this point in the history
* benchmarks
* test data
* group label instead of repo-name label w fleet 0.9.11
* toggle for metrics
* stable benchmark order via --seed
* check for at least one cluster
* skip confusing report table printing
  • Loading branch information
manno committed Dec 3, 2024
1 parent 1ffdf9c commit 196d24c
Show file tree
Hide file tree
Showing 16 changed files with 177,763 additions and 12 deletions.
325 changes: 325 additions & 0 deletions benchmarks/assets/create-1-bundle/bundle.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,325 @@
---
apiVersion: fleet.cattle.io/v1alpha1
kind: Bundle
metadata:
name: create-1-bundle
labels:
fleet.cattle.io/commit: 269ad1f41bd40bf9f2df9dec571fcc299ecf5c94
fleet.cattle.io/bench-group: create-1-bundle
spec:
resources:
- content: |
apiVersion: v1
kind: ConfigMap
metadata:
name: test-config-10kb
data:
bootstrap.sh: |
#!/bin/bash
: ${HADOOP_PREFIX:=/usr/local/hadoop}
. $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh
# Directory to find config artifacts
CONFIG_DIR="/tmp/hadoop-config"
# Copy config files from volume mount
for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do
if [[ -e ${CONFIG_DIR}/$f ]]; then
cp ${CONFIG_DIR}/$f $HADOOP_PREFIX/etc/hadoop/$f
else
echo "ERROR: Could not find $f in $CONFIG_DIR"
exit 1
fi
done
# installing libraries if any - (resource urls added comma separated to the ACP system variable)
cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd -
if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then
mkdir -p /root/hdfs/namenode
$HADOOP_PREFIX/bin/hdfs namenode -format -force -nonInteractive
$HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode
fi
if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then
mkdir -p /root/hdfs/datanode
# wait up to 30 seconds for namenode
(while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-hdfs-nn" ; sleep 2; done && [[ $count -lt 15 ]])
[[ $? -ne 0 ]] && echo "Timeout waiting for hdfs-nn, exiting." && exit 1
$HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode
fi
if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then
cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFIX/sbin/
cd $HADOOP_PREFIX/sbin
chmod +x start-yarn-rm.sh
./start-yarn-rm.sh
fi
if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then
sed -i '/<\/configuration>/d' $HADOOP_PREFIX/etc/hadoop/yarn-site.xml
cat >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml <<- EOM
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>${MY_MEM_LIMIT:-2048}</value>
</property>
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>${MY_CPU_LIMIT:-2}</value>
</property>
EOM
echo '</configuration>' >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml
cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFIX/sbin/
cd $HADOOP_PREFIX/sbin
chmod +x start-yarn-nm.sh
# wait up to 30 seconds for resourcemanager
(while [[ $count -lt 15 && -z `curl -sf http://foo-hadoop-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo "Waiting for foo-hadoop-yarn-rm" ; sleep 2; done && [[ $count -lt 15 ]])
[[ $? -ne 0 ]] && echo "Timeout waiting for yarn-rm, exiting." && exit 1
./start-yarn-nm.sh
fi
if [[ $1 == "-d" ]]; then
until find ${HADOOP_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done
tail -F ${HADOOP_PREFIX}/logs/* &
while true; do sleep 1000; done
fi
if [[ $1 == "-bash" ]]; then
/bin/bash
fi
core-site.xml: |
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://foo-hadoop-hdfs-nn:9000/</value>
<description>NameNode URI</description>
</property>
</configuration>
hdfs-site.xml: |
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration><property>
<name>dfs.datanode.use.datanode.hostname</name>
<value>false</value>
</property>
<property>
<name>dfs.client.use.datanode.hostname</name>
<value>false</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///root/hdfs/datanode</value>
<description>DataNode directory</description>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///root/hdfs/namenode</value>
<description>NameNode directory for namespace and transaction logs storage.</description>
</property>
<property>
<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
<value>false</value>
</property>
<!-- Bind to all interfaces -->
<property>
<name>dfs.namenode.rpc-bind-host</name>
<value>0.0.0.0</value>
</property>
<property>
<name>dfs.namenode.servicerpc-bind-host</name>
<value>0.0.0.0</value>
</property>
<!-- /Bind to all interfaces -->
</configuration>
mapred-site.xml: |
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>foo-hadoop-yarn-rm-0.foo-hadoop-yarn-rm.default.svc.cluster.local:19888</value>
</property>
</configuration>
slaves: 'localhost
'
start-yarn-nm.sh: |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Start all yarn daemons. Run this on master node.
echo "starting yarn daemons"
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
. $HADOOP_LIBEXEC_DIR/yarn-config.sh
# start resourceManager
# "$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager
# start nodeManager
"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager
# start proxyserver
#"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver
start-yarn-rm.sh: |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Start all yarn daemons. Run this on master node.
echo "starting yarn daemons"
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
. $HADOOP_LIBEXEC_DIR/yarn-config.sh
# start resourceManager
"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager
# start nodeManager
# "$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager
# start proxyserver
"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver
yarn-site.xml: |
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>foo-hadoop-yarn-rm</value>
</property>
<!-- Bind to all interfaces -->
<property>
<name>yarn.resourcemanager.bind-host</name>
<value>0.0.0.0</value>
</property>
<property>
<name>yarn.nodemanager.bind-host</name>
<value>0.0.0.0</value>
</property>
<property>
<name>yarn.timeline-service.bind-host</name>
<value>0.0.0.0</value>
</property>
<!-- /Bind to all interfaces -->
<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<description>List of directories to store localized files in.</description>
<name>yarn.nodemanager.local-dirs</name>
<value>/var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir</value>
</property>
<property>
<description>Where to store container logs.</description>
<name>yarn.nodemanager.log-dirs</name>
<value>/var/log/hadoop-yarn/containers</value>
</property>
<property>
<description>Where to aggregate logs to.</description>
<name>yarn.nodemanager.remote-app-log-dir</name>
<value>/var/log/hadoop-yarn/apps</value>
</property>
<property>
<name>yarn.application.classpath</name>
<value>
/usr/local/hadoop/etc/hadoop,
/usr/local/hadoop/share/hadoop/common/*,
/usr/local/hadoop/share/hadoop/common/lib/*,
/usr/local/hadoop/share/hadoop/hdfs/*,
/usr/local/hadoop/share/hadoop/hdfs/lib/*,
/usr/local/hadoop/share/hadoop/mapreduce/*,
/usr/local/hadoop/share/hadoop/mapreduce/lib/*,
/usr/local/hadoop/share/hadoop/yarn/*,
/usr/local/hadoop/share/hadoop/yarn/lib/*
</value>
</property>
</configuration>
name: configmap.yaml
targets:
- clusterSelector:
matchLabels:
fleet.cattle.io/benchmark: "true"
Loading

0 comments on commit 196d24c

Please sign in to comment.