diff --git a/modules/bash/storage/fio.sh b/modules/bash/storage/fio.sh index 23b725bbd..1358d26eb 100644 --- a/modules/bash/storage/fio.sh +++ b/modules/bash/storage/fio.sh @@ -26,6 +26,10 @@ run_fio_on_remote_vm() { local privatekey_path=$2 local mount_point=$3 local result_dir=$4 + local bs=$5 + local iodepth=$6 + local method=$7 + local runtime=$8 mkdir -p $result_dir @@ -36,36 +40,24 @@ run_fio_on_remote_vm() { local file_path="${mount_point}/benchtest" - local methods=("randread" "read" "randwrite" "write") - local iodepths=(1 4 8 16) - local blocksizes=("4k" "256k") - echo "Run fio" set +x # disable debug output because it will mess up the output of fio - for method in "${methods[@]}" - do - for iodepth in "${iodepths[@]}" - do - for bs in "${blocksizes[@]}" - do - metadata_json="{\"BlockSize\": \"$bs\", \"IoDepth\": \"$iodepth\", \"Operation\": \"$method\", \"FileSize\": \"$file_size\"}" - echo "$metadata_json" > $result_dir/metadata-${method}-${iodepth}-${bs}.log - local command="sudo fio --name=benchtest --size=$file_size --filename=$file_path --direct=1 --rw=$method --ioengine=libaio --bs=$bs --iodepth=$iodepth --time_based --runtime=60 --output-format=json" - - # prepare files for the actual run using fio option --create_only=1 - setup_command="${command} --create_only=1" - echo "Run command: $setup_command" - run_ssh $privatekey_path ubuntu $egress_ip_address 2222 "$setup_command" - sleep 30 # wait to clean any potential throttle / cache - - # execute the actual run for metrics collection - echo "Run command: $command" - run_ssh $privatekey_path ubuntu $egress_ip_address 2222 "$command" | tee $result_dir/fio-${method}-${iodepth}-${bs}.log - sleep 30 # wait to clean any potential throttle / cache - done - done - done + + metadata_json="{\"BlockSize\": \"$bs\", \"IoDepth\": \"$iodepth\", \"Operation\": \"$method\", \"FileSize\": \"$file_size\"}" + echo "$metadata_json" > $result_dir/metadata.log + local command="sudo fio --name=benchtest --size=$file_size --filename=$file_path --direct=1 --rw=$method --ioengine=libaio --bs=$bs --iodepth=$iodepth --time_based --runtime=$runtime --output-format=json" + + # prepare files for the actual run using fio option --create_only=1 + setup_command="${command} --create_only=1" + echo "Run command: $setup_command" + run_ssh $privatekey_path ubuntu $egress_ip_address 2222 "$setup_command" + sleep 30 # wait to clean any potential throttle / cache + + # execute the actual run for metrics collection + echo "Run command: $command" + run_ssh $privatekey_path ubuntu $egress_ip_address 2222 "$command" | tee $result_dir/fio.log + sleep 30 # wait to clean any potential throttle / cache if $DEBUG; then # re-enable debug output if DEBUG is set set -x @@ -77,189 +69,164 @@ collect_result_disk_fio() { local result_dir=$1 local run_link=$2 - echo "collecting fio results from $result_dir/fio-*.log" - - local methods=("randread" "read" "randwrite" "write") - local iodepths=(1 4 8 16) - local blocksizes=("4k" "256k") + echo "collecting fio results from $result_dir/fio.log" # TODO(@guwe): add pricing DATA_DISK_PRICE_PER_MONTH="${DATA_DISK_PRICE_PER_MONTH:=0}" - for method in "${methods[@]}" - do - for iodepth in "${iodepths[@]}" - do - for bs in "${blocksizes[@]}" - do - metadata="$(cat $result_dir/metadata-${method}-${iodepth}-${bs}.log)" - result="$result_dir/fio-${method}-${iodepth}-${bs}.log" - echo "========= collecting ${result} ===========" - cat $result - - read_iops_avg=$(cat $result | jq '.jobs[0].read.iops_mean') - read_bw_avg=$(cat $result | jq '.jobs[0].read.bw_mean') - read_lat_avg=$(cat $result | jq '.jobs[0].read.clat_ns.mean') - write_iops_avg=$(cat $result | jq '.jobs[0].write.iops_mean') - write_bw_avg=$(cat $result | jq '.jobs[0].write.bw_mean') - write_lat_avg=$(cat $result | jq '.jobs[0].write.clat_ns.mean') - read_lat_p50=$(cat $result | jq '.jobs[0].read.clat_ns.percentile."50.000000"') - read_lat_p99=$(cat $result | jq '.jobs[0].read.clat_ns.percentile."99.000000"') - read_lat_p999=$(cat $result | jq '.jobs[0].read.clat_ns.percentile."99.900000"') - write_lat_p50=$(cat $result | jq '.jobs[0].write.clat_ns.percentile."50.000000"') - write_lat_p99=$(cat $result | jq '.jobs[0].write.clat_ns.percentile."99.000000"') - write_lat_p999=$(cat $result | jq '.jobs[0].write.clat_ns.percentile."99.900000"') - - data=$(jq --null-input \ - --arg timestamp "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ - --arg method "${method}" \ - --arg location "$REGION" \ - --arg vm_size "$MACHINE_TYPE" \ - --arg run_url "$run_link" \ - --arg cloud "$CLOUD" \ - --arg target_iops "$TARGET_IOPS" \ - --arg case_name "$CASE_NAME" \ - --arg data_disk_type "$DATA_DISK_TYPE" \ - --arg data_disk_size "$DATA_DISK_SIZE_GB" \ - --arg data_disk_tier "$DATA_DISK_TIER" \ - --arg data_disk_iops_rw "$DATA_DISK_IOPS_READ_WRITE" \ - --arg data_disk_iops_r "$DATA_DISK_IOPS_READ_ONLY" \ - --arg data_disk_mbps_rw "$DATA_DISK_MBPS_READ_WRITE" \ - --arg data_disk_mbps_r "$DATA_DISK_MBPS_READ_ONLY" \ - --arg data_disk_price_per_month "$DATA_DISK_PRICE_PER_MONTH" \ - --arg read_iops_avg "$read_iops_avg" \ - --arg read_bw_avg "$read_bw_avg" \ - --arg read_lat_avg "$read_lat_avg" \ - --arg write_iops_avg "$write_iops_avg" \ - --arg write_bw_avg "$write_bw_avg" \ - --arg write_lat_avg "$write_lat_avg" \ - --arg read_lat_p50 "$read_lat_p50" \ - --arg read_lat_p99 "$read_lat_p99" \ - --arg read_lat_p999 "$read_lat_p999" \ - --arg write_lat_p50 "$write_lat_p50" \ - --arg write_lat_p99 "$write_lat_p99" \ - --arg write_lat_p999 "$write_lat_p999" \ - --arg metadata "$metadata" \ - '{ - timestamp: $timestamp, - method: $method, - location: $location, - vm_size: $vm_size, - run_url: $run_url, - cloud: $cloud, - target_iops: $target_iops, - case_name: $case_name, - data_disk_type: $data_disk_type, - data_disk_size: $data_disk_size, - data_disk_tier: $data_disk_tier, - data_disk_iops_rw: $data_disk_iops_rw, - data_disk_iops_r: $data_disk_iops_r, - data_disk_mbps_rw: $data_disk_mbps_rw, - data_disk_mbps_r: $data_disk_mbps_r, - data_disk_price_per_month: $data_disk_price_per_month, - read_iops_avg: $read_iops_avg, - read_bw_avg: $read_bw_avg, - read_lat_avg: $read_lat_avg, - write_iops_avg: $write_iops_avg, - write_bw_avg: $write_bw_avg, - write_lat_avg: $write_lat_avg, - read_lat_p50: $read_lat_p50, - read_lat_p99: $read_lat_p99, - read_lat_p999: $read_lat_p999, - write_lat_p50: $write_lat_p50, - write_lat_p99: $write_lat_p99, - write_lat_p999: $write_lat_p999, - metadata: $metadata - }') - - echo $data >> $result_dir/results.json - done - done - done + metadata="$(cat $result_dir/metadata.log)" + result="$result_dir/fio.log" + echo "========= collecting ${result} ===========" + cat $result + + read_iops_avg=$(cat $result | jq '.jobs[0].read.iops_mean') + read_bw_avg=$(cat $result | jq '.jobs[0].read.bw_mean') + read_lat_avg=$(cat $result | jq '.jobs[0].read.clat_ns.mean') + write_iops_avg=$(cat $result | jq '.jobs[0].write.iops_mean') + write_bw_avg=$(cat $result | jq '.jobs[0].write.bw_mean') + write_lat_avg=$(cat $result | jq '.jobs[0].write.clat_ns.mean') + read_lat_p50=$(cat $result | jq '.jobs[0].read.clat_ns.percentile."50.000000"') + read_lat_p99=$(cat $result | jq '.jobs[0].read.clat_ns.percentile."99.000000"') + read_lat_p999=$(cat $result | jq '.jobs[0].read.clat_ns.percentile."99.900000"') + write_lat_p50=$(cat $result | jq '.jobs[0].write.clat_ns.percentile."50.000000"') + write_lat_p99=$(cat $result | jq '.jobs[0].write.clat_ns.percentile."99.000000"') + write_lat_p999=$(cat $result | jq '.jobs[0].write.clat_ns.percentile."99.900000"') + + data=$(jq --null-input \ + --arg timestamp "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ + --arg method "${method}" \ + --arg location "$REGION" \ + --arg vm_size "$MACHINE_TYPE" \ + --arg run_url "$run_link" \ + --arg cloud "$CLOUD" \ + --arg target_iops "$TARGET_IOPS" \ + --arg case_name "$CASE_NAME" \ + --arg data_disk_type "$DATA_DISK_TYPE" \ + --arg data_disk_size "$DATA_DISK_SIZE_GB" \ + --arg data_disk_tier "$DATA_DISK_TIER" \ + --arg data_disk_iops_rw "$DATA_DISK_IOPS_READ_WRITE" \ + --arg data_disk_iops_r "$DATA_DISK_IOPS_READ_ONLY" \ + --arg data_disk_mbps_rw "$DATA_DISK_MBPS_READ_WRITE" \ + --arg data_disk_mbps_r "$DATA_DISK_MBPS_READ_ONLY" \ + --arg data_disk_price_per_month "$DATA_DISK_PRICE_PER_MONTH" \ + --arg read_iops_avg "$read_iops_avg" \ + --arg read_bw_avg "$read_bw_avg" \ + --arg read_lat_avg "$read_lat_avg" \ + --arg write_iops_avg "$write_iops_avg" \ + --arg write_bw_avg "$write_bw_avg" \ + --arg write_lat_avg "$write_lat_avg" \ + --arg read_lat_p50 "$read_lat_p50" \ + --arg read_lat_p99 "$read_lat_p99" \ + --arg read_lat_p999 "$read_lat_p999" \ + --arg write_lat_p50 "$write_lat_p50" \ + --arg write_lat_p99 "$write_lat_p99" \ + --arg write_lat_p999 "$write_lat_p999" \ + --arg metadata "$metadata" \ + '{ + timestamp: $timestamp, + method: $method, + location: $location, + vm_size: $vm_size, + run_url: $run_url, + cloud: $cloud, + target_iops: $target_iops, + case_name: $case_name, + data_disk_type: $data_disk_type, + data_disk_size: $data_disk_size, + data_disk_tier: $data_disk_tier, + data_disk_iops_rw: $data_disk_iops_rw, + data_disk_iops_r: $data_disk_iops_r, + data_disk_mbps_rw: $data_disk_mbps_rw, + data_disk_mbps_r: $data_disk_mbps_r, + data_disk_price_per_month: $data_disk_price_per_month, + read_iops_avg: $read_iops_avg, + read_bw_avg: $read_bw_avg, + read_lat_avg: $read_lat_avg, + write_iops_avg: $write_iops_avg, + write_bw_avg: $write_bw_avg, + write_lat_avg: $write_lat_avg, + read_lat_p50: $read_lat_p50, + read_lat_p99: $read_lat_p99, + read_lat_p999: $read_lat_p999, + write_lat_p50: $write_lat_p50, + write_lat_p99: $write_lat_p99, + write_lat_p999: $write_lat_p999, + metadata: $metadata + }') + + echo $data >> $result_dir/results.json } collect_result_blob_fio() { local result_dir=$1 local run_link=$2 - echo "collecting fio results from $result_dir/fio-*.log" - - local methods=("randread" "read" "randwrite" "write") - local iodepths=(1 4 8 16) - local blocksizes=("4k" "256k") - - for method in "${methods[@]}" - do - for iodepth in "${iodepths[@]}" - do - for bs in "${blocksizes[@]}" - do - metadata="$(cat $result_dir/metadata-${method}-${iodepth}-${bs}.log)" - result="$result_dir/fio-${method}-${iodepth}-${bs}.log" - echo "========= collecting ${result} ===========" - cat $result - - read_iops_avg=$(cat $result | jq '.jobs[0].read.iops_mean') - read_bw_avg=$(cat $result | jq '.jobs[0].read.bw_mean') - read_lat_avg=$(cat $result | jq '.jobs[0].read.clat_ns.mean') - write_iops_avg=$(cat $result | jq '.jobs[0].write.iops_mean') - write_bw_avg=$(cat $result | jq '.jobs[0].write.bw_mean') - write_lat_avg=$(cat $result | jq '.jobs[0].write.clat_ns.mean') - - data=$(jq --null-input \ - --arg timestamp "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ - --arg method "$method" \ - --arg location "$REGION" \ - --arg vm_size "$MACHINE_TYPE" \ - --arg run_url "$run_link" \ - --arg cloud "$CLOUD" \ - --arg case_name "$CASE_NAME" \ - --arg storage_tier "$STORAGE_TIER" \ - --arg storage_kind "$STORAGE_KIND" \ - --arg storage_replication "$STORAGE_REPLICATION" \ - --arg read_iops_avg "$read_iops_avg" \ - --arg read_bw_avg "$read_bw_avg" \ - --arg read_lat_avg "$read_lat_avg" \ - --arg write_iops_avg "$write_iops_avg" \ - --arg write_bw_avg "$write_bw_avg" \ - --arg write_lat_avg "$write_lat_avg" \ - --arg read_lat_p50 "$read_lat_p50" \ - --arg read_lat_p99 "$read_lat_p99" \ - --arg read_lat_p999 "$read_lat_p999" \ - --arg write_lat_p50 "$write_lat_p50" \ - --arg write_lat_p99 "$write_lat_p99" \ - --arg write_lat_p999 "$write_lat_p999" \ - --arg metadata "$metadata" \ - '{ - timestamp: $timestamp, - method: $method, - location: $location, - vm_size: $vm_size, - run_url: $run_url, - cloud: $cloud, - case_name: $case_name, - storage_tier: $storage_tier, - storage_kind: $storage_kind, - storage_replication: $storage_replication, - read_iops_avg: $read_iops_avg, - read_bw_avg: $read_bw_avg, - read_lat_avg: $read_lat_avg, - write_iops_avg: $write_iops_avg, - write_bw_avg: $write_bw_avg, - write_lat_avg: $write_lat_avg, - read_lat_p50: $read_lat_p50, - read_lat_p99: $read_lat_p99, - read_lat_p999: $read_lat_p999, - write_lat_p50: $write_lat_p50, - write_lat_p99: $write_lat_p99, - write_lat_p999: $write_lat_p999, - metadata: $metadata - }') - - echo $data >> $result_dir/results.json - done - done - done + echo "collecting fio results from $result_dir/fio.log" + + + metadata="$(cat $result_dir/metadata.log)" + result="$result_dir/fio.log" + echo "========= collecting ${result} ===========" + cat $result + + read_iops_avg=$(cat $result | jq '.jobs[0].read.iops_mean') + read_bw_avg=$(cat $result | jq '.jobs[0].read.bw_mean') + read_lat_avg=$(cat $result | jq '.jobs[0].read.clat_ns.mean') + write_iops_avg=$(cat $result | jq '.jobs[0].write.iops_mean') + write_bw_avg=$(cat $result | jq '.jobs[0].write.bw_mean') + write_lat_avg=$(cat $result | jq '.jobs[0].write.clat_ns.mean') + + data=$(jq --null-input \ + --arg timestamp "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ + --arg method "$method" \ + --arg location "$REGION" \ + --arg vm_size "$MACHINE_TYPE" \ + --arg run_url "$run_link" \ + --arg cloud "$CLOUD" \ + --arg case_name "$CASE_NAME" \ + --arg storage_tier "$STORAGE_TIER" \ + --arg storage_kind "$STORAGE_KIND" \ + --arg storage_replication "$STORAGE_REPLICATION" \ + --arg read_iops_avg "$read_iops_avg" \ + --arg read_bw_avg "$read_bw_avg" \ + --arg read_lat_avg "$read_lat_avg" \ + --arg write_iops_avg "$write_iops_avg" \ + --arg write_bw_avg "$write_bw_avg" \ + --arg write_lat_avg "$write_lat_avg" \ + --arg read_lat_p50 "$read_lat_p50" \ + --arg read_lat_p99 "$read_lat_p99" \ + --arg read_lat_p999 "$read_lat_p999" \ + --arg write_lat_p50 "$write_lat_p50" \ + --arg write_lat_p99 "$write_lat_p99" \ + --arg write_lat_p999 "$write_lat_p999" \ + --arg metadata "$metadata" \ + '{ + timestamp: $timestamp, + method: $method, + location: $location, + vm_size: $vm_size, + run_url: $run_url, + cloud: $cloud, + case_name: $case_name, + storage_tier: $storage_tier, + storage_kind: $storage_kind, + storage_replication: $storage_replication, + read_iops_avg: $read_iops_avg, + read_bw_avg: $read_bw_avg, + read_lat_avg: $read_lat_avg, + write_iops_avg: $write_iops_avg, + write_bw_avg: $write_bw_avg, + write_lat_avg: $write_lat_avg, + read_lat_p50: $read_lat_p50, + read_lat_p99: $read_lat_p99, + read_lat_p999: $read_lat_p999, + write_lat_p50: $write_lat_p50, + write_lat_p99: $write_lat_p99, + write_lat_p999: $write_lat_p999, + metadata: $metadata + }') + + echo $data >> $result_dir/results.json } collect_result_fileshare_fio() { @@ -275,88 +242,75 @@ collect_result_fileshare_fio() { small_file_rw="0" fi - echo "collecting fio results from $result_dir/fio-*.log" - - local methods=("randread" "read" "randwrite" "write") - local iodepths=(1 4 8 16) - local blocksizes=("4k" "256k") - - for method in "${methods[@]}" - do - for iodepth in "${iodepths[@]}" - do - for bs in "${blocksizes[@]}" - do - metadata="$(cat $result_dir/metadata-${method}-${iodepth}-${bs}.log)" - result="$result_dir/fio-${method}-${iodepth}-${bs}.log" - echo "========= collecting ${result} ===========" - cat $result - - read_iops_avg=$(cat $result | jq '.jobs[0].read.iops_mean') - read_bw_avg=$(cat $result | jq '.jobs[0].read.bw_mean') - read_lat_avg=$(cat $result | jq '.jobs[0].read.clat_ns.mean') - write_iops_avg=$(cat $result | jq '.jobs[0].write.iops_mean') - write_bw_avg=$(cat $result | jq '.jobs[0].write.bw_mean') - write_lat_avg=$(cat $result | jq '.jobs[0].write.clat_ns.mean') - - data=$(jq --null-input \ - --arg timestamp "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ - --arg method "$method" \ - --arg location "$REGION" \ - --arg vm_size "$MACHINE_TYPE" \ - --arg run_url "$run_link" \ - --arg cloud "$CLOUD" \ - --arg case_name "$CASE_NAME" \ - --arg storage_tier "$STORAGE_TIER" \ - --arg storage_kind "$STORAGE_KIND" \ - --arg storage_replication "$STORAGE_REPLICATION" \ - --arg storage_share_quota "$STORAGE_SHARE_QUOTA" \ - --arg storage_share_enabled_protocol "$STORAGE_SHARE_ENABLED_PROTOCOL" \ - --arg read_iops_avg "$read_iops_avg" \ - --arg read_bw_avg "$read_bw_avg" \ - --arg read_lat_avg "$read_lat_avg" \ - --arg write_iops_avg "$write_iops_avg" \ - --arg write_bw_avg "$write_bw_avg" \ - --arg write_lat_avg "$write_lat_avg" \ - --arg small_file_rw "$small_file_rw" \ - --arg read_lat_p50 "$read_lat_p50" \ - --arg read_lat_p99 "$read_lat_p99" \ - --arg read_lat_p999 "$read_lat_p999" \ - --arg write_lat_p50 "$write_lat_p50" \ - --arg write_lat_p99 "$write_lat_p99" \ - --arg write_lat_p999 "$write_lat_p999" \ - --arg metadata "$metadata" \ - '{ - timestamp: $timestamp, - method: $method, - location: $location, - vm_size: $vm_size, - run_url: $run_url, - cloud: $cloud, - case_name: $case_name, - storage_tier: $storage_tier, - storage_kind: $storage_kind, - storage_replication: $storage_replication, - storage_share_quota: $storage_share_quota, - storage_share_enabled_protocol: $storage_share_enabled_protocol, - read_iops_avg: $read_iops_avg, - read_bw_avg: $read_bw_avg, - read_lat_avg: $read_lat_avg, - write_iops_avg: $write_iops_avg, - write_bw_avg: $write_bw_avg, - write_lat_avg: $write_lat_avg, - small_file_rw: $small_file_rw, - read_lat_p50: $read_lat_p50, - read_lat_p99: $read_lat_p99, - read_lat_p999: $read_lat_p999, - write_lat_p50: $write_lat_p50, - write_lat_p99: $write_lat_p99, - write_lat_p999: $write_lat_p999, - metadata: $metadata - }') - - echo $data >> $result_dir/results.json - done - done - done + echo "collecting fio results from $result_dir/fio.log" + + metadata="$(cat $result_dir/metadata.log)" + result="$result_dir/fio.log" + echo "========= collecting ${result} ===========" + cat $result + + read_iops_avg=$(cat $result | jq '.jobs[0].read.iops_mean') + read_bw_avg=$(cat $result | jq '.jobs[0].read.bw_mean') + read_lat_avg=$(cat $result | jq '.jobs[0].read.clat_ns.mean') + write_iops_avg=$(cat $result | jq '.jobs[0].write.iops_mean') + write_bw_avg=$(cat $result | jq '.jobs[0].write.bw_mean') + write_lat_avg=$(cat $result | jq '.jobs[0].write.clat_ns.mean') + + data=$(jq --null-input \ + --arg timestamp "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ + --arg method "$method" \ + --arg location "$REGION" \ + --arg vm_size "$MACHINE_TYPE" \ + --arg run_url "$run_link" \ + --arg cloud "$CLOUD" \ + --arg case_name "$CASE_NAME" \ + --arg storage_tier "$STORAGE_TIER" \ + --arg storage_kind "$STORAGE_KIND" \ + --arg storage_replication "$STORAGE_REPLICATION" \ + --arg storage_share_quota "$STORAGE_SHARE_QUOTA" \ + --arg storage_share_enabled_protocol "$STORAGE_SHARE_ENABLED_PROTOCOL" \ + --arg read_iops_avg "$read_iops_avg" \ + --arg read_bw_avg "$read_bw_avg" \ + --arg read_lat_avg "$read_lat_avg" \ + --arg write_iops_avg "$write_iops_avg" \ + --arg write_bw_avg "$write_bw_avg" \ + --arg write_lat_avg "$write_lat_avg" \ + --arg small_file_rw "$small_file_rw" \ + --arg read_lat_p50 "$read_lat_p50" \ + --arg read_lat_p99 "$read_lat_p99" \ + --arg read_lat_p999 "$read_lat_p999" \ + --arg write_lat_p50 "$write_lat_p50" \ + --arg write_lat_p99 "$write_lat_p99" \ + --arg write_lat_p999 "$write_lat_p999" \ + --arg metadata "$metadata" \ + '{ + timestamp: $timestamp, + method: $method, + location: $location, + vm_size: $vm_size, + run_url: $run_url, + cloud: $cloud, + case_name: $case_name, + storage_tier: $storage_tier, + storage_kind: $storage_kind, + storage_replication: $storage_replication, + storage_share_quota: $storage_share_quota, + storage_share_enabled_protocol: $storage_share_enabled_protocol, + read_iops_avg: $read_iops_avg, + read_bw_avg: $read_bw_avg, + read_lat_avg: $read_lat_avg, + write_iops_avg: $write_iops_avg, + write_bw_avg: $write_bw_avg, + write_lat_avg: $write_lat_avg, + small_file_rw: $small_file_rw, + read_lat_p50: $read_lat_p50, + read_lat_p99: $read_lat_p99, + read_lat_p999: $read_lat_p999, + write_lat_p50: $write_lat_p50, + write_lat_p99: $write_lat_p99, + write_lat_p999: $write_lat_p999, + metadata: $metadata + }') + + echo $data >> $result_dir/results.json } \ No newline at end of file diff --git a/modules/terraform/azure/network/main.tf b/modules/terraform/azure/network/main.tf index b804a98f8..7c7542766 100644 --- a/modules/terraform/azure/network/main.tf +++ b/modules/terraform/azure/network/main.tf @@ -1,11 +1,12 @@ locals { - nsr_rules_map = { for rule in var.network_config.nsr_rules : rule.name => rule } - vnet_name = var.network_config.vnet_name - input_subnet_map = { for subnet in var.network_config.subnet : subnet.name => subnet } - subnets_map = { for subnet in azurerm_subnet.subnets : subnet.name => subnet } - network_security_group_name = var.network_config.network_security_group_name - nic_association_map = { for nic in var.network_config.nic_public_ip_associations : nic.nic_name => nic } - tags = merge(var.tags, { "role" = var.network_config.role }) + nsr_rules_map = { for rule in var.network_config.nsr_rules : rule.name => rule } + nat_gateway_associations_map = var.network_config.nat_gateway_associations == null ? {} : { for nat in var.network_config.nat_gateway_associations : nat.nat_gateway_name => nat } + vnet_name = var.network_config.vnet_name + input_subnet_map = { for subnet in var.network_config.subnet : subnet.name => subnet } + subnets_map = { for subnet in azurerm_subnet.subnets : subnet.name => subnet } + network_security_group_name = var.network_config.network_security_group_name + nic_association_map = { for nic in var.network_config.nic_public_ip_associations : nic.nic_name => nic } + tags = merge(var.tags, { "role" = var.network_config.role }) } resource "azurerm_virtual_network" "vnet" { @@ -77,3 +78,15 @@ module "nsr" { resource_group_name = var.resource_group_name network_security_group_name = azurerm_network_security_group.nsg[0].name } + +module "nat_gateway" { + source = "./nat-gateway" + for_each = local.nat_gateway_associations_map + + nat_gateway_name = each.value.nat_gateway_name + location = var.location + public_ip_address_id = var.public_ips[each.value.public_ip_name] + resource_group_name = var.resource_group_name + subnet_id = local.subnets_map[each.value.subnet_name].id + tags = local.tags +} diff --git a/modules/terraform/azure/network/nat-gateway/main.tf b/modules/terraform/azure/network/nat-gateway/main.tf new file mode 100644 index 000000000..91a74640f --- /dev/null +++ b/modules/terraform/azure/network/nat-gateway/main.tf @@ -0,0 +1,18 @@ +resource "azurerm_nat_gateway" "nat_gateway" { + name = var.nat_gateway_name + location = var.location + resource_group_name = var.resource_group_name + sku_name = "Standard" + + tags = var.tags +} + +resource "azurerm_nat_gateway_public_ip_association" "ipng" { + nat_gateway_id = azurerm_nat_gateway.nat_gateway.id + public_ip_address_id = var.public_ip_address_id +} + +resource "azurerm_subnet_nat_gateway_association" "subnetng" { + subnet_id = var.subnet_id + nat_gateway_id = azurerm_nat_gateway.nat_gateway.id +} \ No newline at end of file diff --git a/modules/terraform/azure/network/nat-gateway/variables.tf b/modules/terraform/azure/network/nat-gateway/variables.tf new file mode 100644 index 000000000..a398007d6 --- /dev/null +++ b/modules/terraform/azure/network/nat-gateway/variables.tf @@ -0,0 +1,32 @@ +variable "resource_group_name" { + description = "Value of the resource group name" + type = string + default = "rg" +} + +variable "location" { + description = "Value of the location" + type = string + default = "East US" +} + +variable "nat_gateway_name" { + description = "Value of the nat gateway name" + type = string +} + +variable "subnet_id" { + description = "Value of the subnet id" + type = string +} + +variable "public_ip_address_id" { + description = "Value of the public ip address id" + type = string +} + +variable "tags" { + type = map(string) + default = { + } +} \ No newline at end of file diff --git a/modules/terraform/azure/network/variables.tf b/modules/terraform/azure/network/variables.tf index 6f4340484..bd3d261b3 100644 --- a/modules/terraform/azure/network/variables.tf +++ b/modules/terraform/azure/network/variables.tf @@ -50,6 +50,11 @@ variable "network_config" { source_address_prefix = string destination_address_prefix = string })) + nat_gateway_associations = optional(list(object({ + nat_gateway_name = string + public_ip_name = string + subnet_name = string + }))) }) } diff --git a/modules/terraform/azure/variables.tf b/modules/terraform/azure/variables.tf index 5b4dd35de..885651795 100644 --- a/modules/terraform/azure/variables.tf +++ b/modules/terraform/azure/variables.tf @@ -86,6 +86,11 @@ variable "network_config_list" { source_address_prefix = string destination_address_prefix = string })) + nat_gateway_associations = optional(list(object({ + nat_gateway_name = string + public_ip_name = string + subnet_name = string + }))) })) default = [] } diff --git a/scenarios/perf-eval/nat-gateway-iperf/bash-scripts/client-userdata.sh b/scenarios/perf-eval/nat-gateway-iperf/bash-scripts/client-userdata.sh new file mode 100644 index 000000000..d29fe9621 --- /dev/null +++ b/scenarios/perf-eval/nat-gateway-iperf/bash-scripts/client-userdata.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +sudo perl -pi -e 's/^#?Port 22$/Port 2222/' /etc/ssh/sshd_config +sudo service ssh restart + +sudo apt-get update && sudo apt-get install iperf -y \ No newline at end of file diff --git a/scenarios/perf-eval/nat-gateway-iperf/bash-scripts/server-userdata.sh b/scenarios/perf-eval/nat-gateway-iperf/bash-scripts/server-userdata.sh new file mode 100644 index 000000000..c579d6a14 --- /dev/null +++ b/scenarios/perf-eval/nat-gateway-iperf/bash-scripts/server-userdata.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +sudo perl -pi -e 's/^#?Port 22$/Port 2222/' /etc/ssh/sshd_config +sudo service ssh restart + +sudo apt-get update && sudo apt-get install iperf -y + +nohup iperf --server --port 20001 &> /dev/null & +nohup iperf --server --udp --port 20002 &> /dev/null & \ No newline at end of file diff --git a/scenarios/perf-eval/nat-gateway-iperf/terraform-inputs/azure.tfvars b/scenarios/perf-eval/nat-gateway-iperf/terraform-inputs/azure.tfvars new file mode 100644 index 000000000..5f4450a48 --- /dev/null +++ b/scenarios/perf-eval/nat-gateway-iperf/terraform-inputs/azure.tfvars @@ -0,0 +1,111 @@ +scenario_type = "perf-eval" +scenario_name = "nat-gateway-iperf" +deletion_delay = "2h" +public_ip_config_list = [ + { + name = "server-vm-pip" + }, + { + name = "client-vm-pip" + }, + { + name = "nat-gateway-pip" + } +] +network_config_list = [ + { + role = "network" + vnet_name = "same-vnet" + vnet_address_space = "10.2.0.0/16" + subnet = [{ + name = "same-subnet" + address_prefix = "10.2.1.0/24" + }] + network_security_group_name = "same-nsg" + nic_public_ip_associations = [ + { + nic_name = "server-nic" + subnet_name = "same-subnet" + ip_configuration_name = "server-ipconfig" + public_ip_name = "server-vm-pip" + }, + { + nic_name = "client-nic" + subnet_name = "same-subnet" + ip_configuration_name = "client-ipconfig" + public_ip_name = "client-vm-pip" + } + ] + nsr_rules = [{ + name = "nsr-ssh" + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "2222" + source_address_prefix = "*" + destination_address_prefix = "*" + }, + { + name = "nsr-tcp" + priority = 101 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "20001-20001" + source_address_prefix = "*" + destination_address_prefix = "*" + }, + { + name = "nsr-udp" + priority = 102 + direction = "Inbound" + access = "Allow" + protocol = "Udp" + source_port_range = "*" + destination_port_range = "20002-20002" + source_address_prefix = "*" + destination_address_prefix = "*" + } + ], + nat_gateway_associations = [{ + nat_gateway_name = "nat-gateway" + subnet_name = "same-subnet" + public_ip_name = "nat-gateway-pip" + }] + } +] +loadbalancer_config_list = [] +vm_config_list = [{ + role = "client" + vm_name = "client-vm" + nic_name = "client-nic" + admin_username = "ubuntu" + zone = "1" + source_image_reference = { + publisher = "Canonical" + offer = "0001-com-ubuntu-server-focal" + sku = "20_04-lts" + version = "latest" + } + create_vm_extension = true + }, + { + role = "server" + vm_name = "server-vm" + nic_name = "server-nic" + admin_username = "ubuntu" + zone = "2" + source_image_reference = { + publisher = "Canonical" + offer = "0001-com-ubuntu-server-focal" + sku = "20_04-lts" + version = "latest" + } + create_vm_extension = true + } +] +vmss_config_list = [] +nic_backend_pool_association_list = [] \ No newline at end of file diff --git a/scenarios/perf-eval/nat-gateway-iperf/terraform-test-inputs/azure.json b/scenarios/perf-eval/nat-gateway-iperf/terraform-test-inputs/azure.json new file mode 100644 index 000000000..ee64de2ac --- /dev/null +++ b/scenarios/perf-eval/nat-gateway-iperf/terraform-test-inputs/azure.json @@ -0,0 +1,8 @@ +{ + "owner" : "terraform_unit_tests", + "run_id" : "123456789", + "region" : "eastus", + "machine_type" : "Standard_D16_v5", + "zone" : "1", + "accelerated_networking" : true + } \ No newline at end of file